From 9e0c209edc0570e063d67f2ba6783c1cc6eb4dfb Mon Sep 17 00:00:00 2001 From: Sylvestre Ledru Date: Fri, 11 Nov 2016 13:34:06 +0100 Subject: [PATCH] New upstream version 1.13.0+dfsg1 --- CONTRIBUTING.md | 15 +- README.md | 4 +- RELEASES.md | 291 +- configure | 38 +- man/rustc.1 | 2 +- man/rustdoc.1 | 2 +- mk/cfg/i686-unknown-haiku.mk | 27 + mk/cfg/mips-unknown-linux-uclibc.mk | 1 + mk/cfg/mips64-unknown-linux-gnuabi64.mk | 1 + mk/cfg/mips64el-unknown-linux-gnuabi64.mk | 1 + mk/cfg/mipsel-unknown-linux-uclibc.mk | 1 + mk/cfg/s390x-unknown-linux-gnu.mk | 24 + mk/cfg/x86_64-unknown-haiku.mk | 27 + mk/clean.mk | 1 - mk/crates.mk | 40 +- mk/dist.mk | 1 + mk/docs.mk | 9 +- mk/install.mk | 3 +- mk/main.mk | 51 +- mk/platform.mk | 2 - mk/rt.mk | 338 +- mk/target.mk | 21 - mk/tests.mk | 3 +- src/bootstrap/Cargo.lock | 21 + src/bootstrap/bin/rustc.rs | 16 +- src/bootstrap/bin/rustdoc.rs | 8 +- src/bootstrap/bootstrap.py | 19 +- src/bootstrap/check.rs | 6 +- src/bootstrap/clean.rs | 1 - src/bootstrap/compile.rs | 94 +- src/bootstrap/config.rs | 11 + src/bootstrap/config.toml.example | 17 +- src/bootstrap/dist.rs | 8 + src/bootstrap/lib.rs | 158 +- src/bootstrap/mk/Makefile.in | 4 + src/bootstrap/native.rs | 399 +-- src/bootstrap/sanity.rs | 17 +- src/bootstrap/step.rs | 15 +- src/bootstrap/util.rs | 2 +- src/compiler-rt/lib/builtins/floatsidf.c | 13 +- src/compiler-rt/lib/builtins/floatsisf.c | 13 +- src/compiler-rt/lib/builtins/int_lib.h | 2 +- src/doc/book/associated-types.md | 2 +- src/doc/book/borrow-and-asref.md | 4 +- src/doc/book/closures.md | 8 +- src/doc/book/compiler-plugins.md | 6 +- src/doc/book/error-handling.md | 8 +- src/doc/book/ffi.md | 3 +- src/doc/book/getting-started.md | 11 +- src/doc/book/lang-items.md | 8 +- src/doc/book/macros.md | 4 +- src/doc/book/nightly-rust.md | 2 +- src/doc/book/no-stdlib.md | 33 +- src/doc/book/ownership.md | 4 +- src/doc/book/references-and-borrowing.md | 2 +- src/doc/book/traits.md | 34 +- src/doc/grammar.md | 5 + src/doc/nomicon/ownership.md | 2 +- src/doc/nomicon/safe-unsafe-meaning.md | 4 - src/doc/reference.md | 27 +- src/doc/rust.css | 2 +- src/doc/style/README.md | 64 - src/doc/style/SUMMARY.md | 50 - src/doc/style/errors/README.md | 3 - src/doc/style/errors/ergonomics.md | 66 - src/doc/style/errors/handling.md | 7 - src/doc/style/errors/propagation.md | 8 - src/doc/style/errors/signaling.md | 125 - src/doc/style/features/README.md | 9 - src/doc/style/features/crates.md | 6 - .../features/functions-and-methods/README.md | 44 - .../functions-and-methods/convenience.md | 43 - .../features/functions-and-methods/input.md | 203 -- .../features/functions-and-methods/output.md | 56 - src/doc/style/features/let.md | 103 - src/doc/style/features/loops.md | 13 - src/doc/style/features/match.md | 26 - src/doc/style/features/modules.md | 133 - src/doc/style/features/traits/README.md | 22 - src/doc/style/features/traits/common.md | 71 - src/doc/style/features/traits/extensions.md | 7 - src/doc/style/features/traits/generics.md | 67 - src/doc/style/features/traits/objects.md | 49 - src/doc/style/features/traits/overloading.md | 7 - src/doc/style/features/traits/reuse.md | 30 - src/doc/style/features/types/README.md | 68 - src/doc/style/features/types/conversions.md | 22 - src/doc/style/features/types/newtype.md | 69 - src/doc/style/ownership/README.md | 3 - src/doc/style/ownership/builders.md | 176 - src/doc/style/ownership/cell-smart.md | 4 - src/doc/style/ownership/constructors.md | 62 - src/doc/style/ownership/destructors.md | 22 - src/doc/style/ownership/raii.md | 12 - src/doc/style/platform.md | 7 - src/doc/style/safety/README.md | 19 - src/doc/style/safety/lib-guarantees.md | 81 - src/doc/style/safety/unsafe.md | 22 - src/doc/style/style/README.md | 5 - src/doc/style/style/braces.md | 77 - src/doc/style/style/comments.md | 122 - src/doc/style/style/features.md | 13 - src/doc/style/style/imports.md | 50 - src/doc/style/style/naming/README.md | 115 - src/doc/style/style/naming/containers.md | 69 - src/doc/style/style/naming/conversions.md | 32 - src/doc/style/style/naming/iterators.md | 32 - src/doc/style/style/naming/ownership.md | 34 - src/doc/style/style/optional.md | 3 - src/doc/style/style/organization.md | 14 - src/doc/style/style/whitespace.md | 133 - src/doc/style/testing/README.md | 5 - src/doc/style/testing/unit.md | 30 - src/doc/style/todo.md | 5 - src/etc/CONFIGS.md | 1 + src/etc/debugger_pretty_printers_common.py | 17 + src/etc/gdb_rust_pretty_printing.py | 24 +- src/etc/lldb_batchmode.py | 6 +- src/etc/lldb_rust_formatters.py | 31 +- src/etc/local_stage0.sh | 2 +- src/etc/platform-intrinsics/generator.py | 31 +- src/etc/test-float-parse/runtests.py | 2 - src/etc/unicode.py | 24 +- src/liballoc/arc.rs | 64 +- src/liballoc/boxed.rs | 18 +- src/liballoc/lib.rs | 4 +- src/liballoc/raw_vec.rs | 11 +- src/liballoc/rc.rs | 551 +++- src/liballoc_jemalloc/build.rs | 6 +- src/liballoc_jemalloc/lib.rs | 4 +- src/liballoc_system/lib.rs | 4 +- src/libarena/lib.rs | 96 +- src/libcollections/binary_heap.rs | 61 +- src/libcollections/borrow.rs | 8 +- src/libcollections/btree/map.rs | 40 +- src/libcollections/btree/set.rs | 22 +- src/libcollections/enum_set.rs | 5 +- src/libcollections/fmt.rs | 14 +- src/libcollections/lib.rs | 3 +- src/libcollections/linked_list.rs | 16 +- src/libcollections/range.rs | 1 - src/libcollections/slice.rs | 2 +- src/libcollections/str.rs | 47 + src/libcollections/string.rs | 8 +- src/libcollections/vec.rs | 96 +- src/libcollections/vec_deque.rs | 36 +- src/libcollectionstest/binary_heap.rs | 3 + src/libcollectionstest/btree/set.rs | 30 +- src/libcollectionstest/lib.rs | 7 +- src/libcollectionstest/slice.rs | 18 + src/libcollectionstest/str.rs | 14 + src/libcollectionstest/vec.rs | 8 + src/libcollectionstest/vec_deque.rs | 6 + src/libcompiler_builtins/Cargo.toml | 15 + src/libcompiler_builtins/build.rs | 404 +++ src/libcompiler_builtins/lib.rs | 19 + src/libcore/any.rs | 23 +- src/libcore/array.rs | 11 +- src/libcore/borrow.rs | 2 - src/libcore/cell.rs | 125 +- src/libcore/char.rs | 173 +- src/libcore/char_private.rs | 2 - src/libcore/clone.rs | 46 +- src/libcore/cmp.rs | 69 +- src/libcore/convert.rs | 74 +- src/libcore/default.rs | 3 - src/libcore/fmt/builders.rs | 1 - src/libcore/fmt/mod.rs | 44 +- src/libcore/fmt/num.rs | 2 - src/libcore/fmt/rt/v1.rs | 2 +- src/libcore/hash/mod.rs | 6 +- src/libcore/hash/sip.rs | 13 +- src/libcore/intrinsics.rs | 76 +- src/libcore/iter/iterator.rs | 70 +- src/libcore/iter/mod.rs | 133 +- src/libcore/iter/range.rs | 69 +- src/libcore/iter/sources.rs | 14 +- src/libcore/iter/traits.rs | 39 +- src/libcore/iter_private.rs | 2 - src/libcore/lib.rs | 16 +- src/libcore/macros.rs | 99 +- src/libcore/marker.rs | 424 ++- src/libcore/mem.rs | 366 ++- src/libcore/nonzero.rs | 1 - src/libcore/num/bignum.rs | 7 +- src/libcore/num/dec2flt/algorithm.rs | 2 - src/libcore/num/dec2flt/mod.rs | 5 +- src/libcore/num/dec2flt/num.rs | 1 - src/libcore/num/dec2flt/parse.rs | 1 - src/libcore/num/dec2flt/rawfp.rs | 1 - src/libcore/num/flt2dec/decoder.rs | 6 +- src/libcore/num/flt2dec/mod.rs | 1 - src/libcore/num/flt2dec/strategy/dragon.rs | 2 - src/libcore/num/flt2dec/strategy/grisu.rs | 2 - src/libcore/num/mod.rs | 54 +- src/libcore/ops.rs | 1099 +++++-- src/libcore/option.rs | 110 +- src/libcore/ptr.rs | 66 +- src/libcore/result.rs | 117 +- src/libcore/slice.rs | 77 +- src/libcore/str/mod.rs | 49 +- src/libcore/str/pattern.rs | 2 - src/libcore/sync/atomic.rs | 150 +- src/libcore/tuple.rs | 4 - src/libcoretest/char.rs | 87 +- src/libcoretest/hash/sip.rs | 3 + src/libcoretest/iter.rs | 12 + src/libcoretest/lib.rs | 2 + src/libcoretest/ptr.rs | 12 +- src/libgetopts/lib.rs | 2 +- src/libgraphviz/lib.rs | 2 +- src/liblibc/.travis.yml | 19 +- src/liblibc/Cargo.lock | 112 + src/liblibc/Cargo.toml | 5 +- src/liblibc/README.md | 2 + src/liblibc/appveyor.yml | 14 +- .../aarch64-unknown-linux-gnu/Dockerfile | 2 +- .../arm-unknown-linux-gnueabihf/Dockerfile | 2 +- .../docker/i686-unknown-linux-gnu/Dockerfile | 2 +- .../docker/i686-unknown-linux-musl/Dockerfile | 22 + .../docker/mips-unknown-linux-gnu/Dockerfile | 13 +- .../docker/mips-unknown-linux-musl/Dockerfile | 14 + .../mipsel-unknown-linux-musl/Dockerfile | 14 + .../powerpc-unknown-linux-gnu/Dockerfile | 10 + .../powerpc64-unknown-linux-gnu/Dockerfile | 11 + .../docker/x86_64-unknown-freebsd/Dockerfile | 4 +- .../x86_64-unknown-linux-gnu/Dockerfile | 2 +- .../x86_64-unknown-linux-musl/Dockerfile | 8 +- .../docker/x86_64-unknown-openbsd/Dockerfile | 7 +- src/liblibc/ci/run-docker.sh | 1 + src/liblibc/ci/run-qemu.sh | 5 +- src/liblibc/ci/run.sh | 57 +- src/liblibc/libc-test/Cargo.lock | 92 - src/liblibc/libc-test/Cargo.toml | 3 + src/liblibc/libc-test/build-generated.rs | 1 + src/liblibc/libc-test/build.rs | 23 +- .../libc-test/generate-files/Cargo.lock | 108 - src/liblibc/src/lib.rs | 10 +- src/liblibc/src/unix/bsd/apple/mod.rs | 136 +- .../src/unix/bsd/freebsdlike/dragonfly/mod.rs | 52 + .../src/unix/bsd/freebsdlike/freebsd/mod.rs | 55 + src/liblibc/src/unix/bsd/freebsdlike/mod.rs | 64 +- src/liblibc/src/unix/bsd/mod.rs | 42 +- .../bsd/{openbsdlike => netbsdlike}/mod.rs | 24 +- .../netbsd.rs => netbsdlike/netbsd/mod.rs} | 56 +- .../bsd/netbsdlike/netbsd/other/b32/mod.rs | 2 + .../bsd/netbsdlike/netbsd/other/b64/mod.rs | 2 + .../unix/bsd/netbsdlike/netbsd/other/mod.rs | 13 + .../unix/bsd/netbsdlike/openbsdlike/bitrig.rs | 75 + .../openbsdlike/mod.rs} | 120 +- .../bsd/netbsdlike/openbsdlike/openbsd.rs | 28 + .../src/unix/bsd/openbsdlike/bitrig.rs | 435 --- src/liblibc/src/unix/haiku/b32.rs | 2 + src/liblibc/src/unix/haiku/b64.rs | 2 + src/liblibc/src/unix/haiku/mod.rs | 748 +++++ src/liblibc/src/unix/mod.rs | 32 +- src/liblibc/src/unix/notbsd/android/b32.rs | 4 + src/liblibc/src/unix/notbsd/android/b64.rs | 4 + src/liblibc/src/unix/notbsd/android/mod.rs | 173 + src/liblibc/src/unix/notbsd/linux/mips.rs | 81 +- src/liblibc/src/unix/notbsd/linux/mips64.rs | 234 ++ src/liblibc/src/unix/notbsd/linux/mod.rs | 111 +- .../src/unix/notbsd/linux/musl/b32/arm.rs | 28 + .../src/unix/notbsd/linux/musl/b32/asmjs.rs | 28 + .../src/unix/notbsd/linux/musl/b32/mips.rs | 39 +- .../src/unix/notbsd/linux/musl/b32/mod.rs | 4 +- .../src/unix/notbsd/linux/musl/b32/x86.rs | 28 + .../src/unix/notbsd/linux/musl/b64/aarch64.rs | 2 + .../src/unix/notbsd/linux/musl/b64/mod.rs | 26 +- .../unix/notbsd/linux/musl/b64/powerpc64.rs | 2 + .../src/unix/notbsd/linux/musl/b64/x86_64.rs | 2 + src/liblibc/src/unix/notbsd/linux/musl/mod.rs | 75 +- .../src/unix/notbsd/linux/other/b32/arm.rs | 145 + .../src/unix/notbsd/linux/other/b32/mod.rs | 24 - .../unix/notbsd/linux/other/b32/powerpc.rs | 142 + .../src/unix/notbsd/linux/other/b32/x86.rs | 143 + .../unix/notbsd/linux/other/b64/aarch64.rs | 97 + .../src/unix/notbsd/linux/other/b64/mod.rs | 14 + .../unix/notbsd/linux/other/b64/powerpc64.rs | 97 + .../src/unix/notbsd/linux/other/b64/x86_64.rs | 109 + .../src/unix/notbsd/linux/other/mod.rs | 189 +- src/liblibc/src/unix/notbsd/linux/s390x.rs | 679 ++++ src/liblibc/src/unix/notbsd/mod.rs | 91 +- src/liblibc/src/unix/solaris/mod.rs | 26 +- src/liblibc/src/windows.rs | 41 + src/libpanic_unwind/gcc.rs | 29 +- src/libpanic_unwind/seh.rs | 4 +- src/libpanic_unwind/seh64_gnu.rs | 15 - src/libproc_macro/Cargo.toml | 15 + src/libproc_macro/build.rs | 89 + src/libproc_macro/lib.rs | 137 + src/libproc_macro/parse.rs | 26 + src/libproc_macro/prelude.rs | 12 + src/libproc_macro/qquote.rs | 470 +++ src/librand/reseeding.rs | 2 + src/librbml/lib.rs | 1609 --------- src/librustc/Cargo.toml | 1 - src/librustc/cfg/construct.rs | 6 +- src/librustc/dep_graph/README.md | 31 + src/librustc/dep_graph/debug.rs | 7 + src/librustc/dep_graph/dep_node.rs | 29 +- src/librustc/dep_graph/dep_tracking_map.rs | 11 + src/librustc/dep_graph/graph.rs | 2 +- src/librustc/dep_graph/mod.rs | 1 + src/librustc/dep_graph/raii.rs | 1 + src/librustc/dep_graph/shadow.rs | 145 + src/librustc/dep_graph/thread.rs | 41 +- src/librustc/diagnostics.rs | 102 +- src/librustc/hir/check_attr.rs | 54 +- src/librustc/hir/def.rs | 44 +- src/librustc/hir/def_id.rs | 83 +- src/librustc/hir/fold.rs | 1115 ------- src/librustc/hir/intravisit.rs | 31 +- src/librustc/hir/lowering.rs | 218 +- src/librustc/hir/map/blocks.rs | 6 +- src/librustc/hir/map/collector.rs | 20 +- src/librustc/hir/map/def_collector.rs | 55 +- src/librustc/hir/map/definitions.rs | 49 +- src/librustc/hir/map/mod.rs | 141 +- src/librustc/hir/mod.rs | 85 +- src/librustc/hir/pat_util.rs | 6 +- src/librustc/hir/print.rs | 22 +- src/librustc/hir/svh.rs | 15 + src/librustc/infer/bivariate.rs | 3 +- src/librustc/infer/combine.rs | 4 +- src/librustc/infer/equate.rs | 3 +- src/librustc/infer/error_reporting.rs | 69 +- src/librustc/infer/freshen.rs | 16 +- src/librustc/infer/glb.rs | 3 +- src/librustc/infer/higher_ranked/mod.rs | 70 +- src/librustc/infer/lub.rs | 3 +- src/librustc/infer/mod.rs | 298 +- .../infer/region_inference/graphviz.rs | 29 +- src/librustc/infer/region_inference/mod.rs | 352 +- src/librustc/infer/resolve.rs | 16 +- src/librustc/infer/sub.rs | 3 +- src/librustc/infer/type_variable.rs | 2 +- src/librustc/lib.rs | 6 +- src/librustc/lint/builtin.rs | 21 +- src/librustc/lint/context.rs | 23 +- src/librustc/lint/mod.rs | 2 +- src/librustc/middle/cstore.rs | 418 +-- src/librustc/middle/dataflow.rs | 4 +- src/librustc/middle/dead.rs | 77 +- src/librustc/middle/dependency_format.rs | 39 +- src/librustc/middle/effect.rs | 76 +- src/librustc/middle/expr_use_visitor.rs | 99 +- src/librustc/middle/free_region.rs | 25 +- src/librustc/middle/intrinsicck.rs | 13 +- src/librustc/middle/lang_items.rs | 3 +- src/librustc/middle/liveness.rs | 15 +- src/librustc/middle/mem_categorization.rs | 176 +- src/librustc/middle/privacy.rs | 5 +- src/librustc/middle/reachable.rs | 24 +- src/librustc/middle/recursion_limit.rs | 1 - src/librustc/middle/region.rs | 24 +- src/librustc/middle/resolve_lifetime.rs | 96 +- src/librustc/middle/stability.rs | 91 +- src/librustc/middle/weak_lang_items.rs | 1 + src/librustc/mir/repr.rs | 107 +- src/librustc/mir/tcx.rs | 14 +- src/librustc/mir/transform.rs | 17 +- src/librustc/mir/visit.rs | 326 +- src/librustc/session/config.rs | 138 +- src/librustc/session/mod.rs | 167 +- src/librustc/traits/coherence.rs | 18 +- src/librustc/traits/error_reporting.rs | 213 +- src/librustc/traits/fulfill.rs | 55 +- src/librustc/traits/mod.rs | 90 +- src/librustc/traits/object_safety.rs | 31 +- src/librustc/traits/project.rs | 9 +- src/librustc/traits/select.rs | 195 +- src/librustc/traits/specialize/mod.rs | 123 +- .../traits/specialize/specialization_graph.rs | 82 +- src/librustc/traits/util.rs | 31 +- src/librustc/ty/_match.rs | 3 +- src/librustc/ty/adjustment.rs | 8 +- src/librustc/ty/cast.rs | 2 +- src/librustc/ty/contents.rs | 4 +- src/librustc/ty/context.rs | 231 +- src/librustc/ty/error.rs | 28 +- src/librustc/ty/fast_reject.rs | 14 +- src/librustc/ty/flags.rs | 42 +- src/librustc/ty/fold.rs | 93 +- src/librustc/ty/item_path.rs | 28 +- src/librustc/ty/layout.rs | 283 +- src/librustc/ty/maps.rs | 12 +- src/librustc/ty/mod.rs | 822 +++-- src/librustc/ty/outlives.rs | 7 +- src/librustc/ty/relate.rs | 208 +- src/librustc/ty/structural_impls.rs | 245 +- src/librustc/ty/sty.rs | 339 +- src/librustc/ty/subst.rs | 763 ++--- src/librustc/ty/trait_def.rs | 19 +- src/librustc/ty/util.rs | 311 +- src/librustc/ty/walk.rs | 46 +- src/librustc/ty/wf.rs | 70 +- src/librustc/util/common.rs | 35 +- src/librustc/util/fs.rs | 43 +- src/librustc/util/ppaux.rs | 387 +-- src/librustc_back/lib.rs | 3 +- src/librustc_back/target/aarch64_apple_ios.rs | 2 +- src/librustc_back/target/apple_ios_base.rs | 4 +- src/librustc_back/target/armv7_apple_ios.rs | 2 +- .../target/armv7_unknown_linux_gnueabihf.rs | 5 +- src/librustc_back/target/armv7s_apple_ios.rs | 2 +- .../target/asmjs_unknown_emscripten.rs | 1 - src/librustc_back/target/haiku_base.rs | 23 + src/librustc_back/target/i386_apple_ios.rs | 2 +- .../target/i586_pc_windows_msvc.rs | 2 +- .../target/i586_unknown_linux_gnu.rs | 2 +- .../target/i686_unknown_haiku.rs | 30 + src/librustc_back/target/le32_unknown_nacl.rs | 1 - .../target/mips64_unknown_linux_gnuabi64.rs | 31 + .../target/mips64el_unknown_linux_gnuabi64.rs | 31 + .../target/mips_unknown_linux_uclibc.rs | 30 + .../target/mipsel_unknown_linux_uclibc.rs | 31 + src/librustc_back/target/mod.rs | 31 +- .../target/s390x_unknown_linux_gnu.rs | 34 + src/librustc_back/target/x86_64_apple_ios.rs | 2 +- .../target/x86_64_unknown_haiku.rs | 30 + src/librustc_bitflags/lib.rs | 7 +- src/librustc_borrowck/borrowck/check_loans.rs | 17 +- src/librustc_borrowck/borrowck/fragments.rs | 104 +- .../borrowck/gather_loans/gather_moves.rs | 9 +- .../borrowck/gather_loans/lifetime.rs | 35 +- .../borrowck/gather_loans/mod.rs | 10 +- .../borrowck/gather_loans/move_error.rs | 35 +- .../borrowck/gather_loans/restrictions.rs | 36 +- .../borrowck/mir/dataflow/impls.rs | 69 +- .../borrowck/mir/dataflow/sanity_check.rs | 27 +- .../borrowck/mir/elaborate_drops.rs | 193 +- .../borrowck/mir/gather_moves.rs | 913 ++---- src/librustc_borrowck/borrowck/mir/mod.rs | 95 +- src/librustc_borrowck/borrowck/mir/patch.rs | 9 +- src/librustc_borrowck/borrowck/mod.rs | 167 +- src/librustc_borrowck/borrowck/move_data.rs | 70 +- src/librustc_borrowck/lib.rs | 3 +- src/librustc_const_eval/check_match.rs | 222 +- src/librustc_const_eval/eval.rs | 116 +- src/librustc_const_eval/lib.rs | 4 +- src/librustc_const_math/lib.rs | 2 +- .../flock.rs | 162 +- src/librustc_data_structures/fnv.rs | 7 + src/librustc_data_structures/lib.rs | 4 + src/librustc_driver/Cargo.toml | 3 +- src/librustc_driver/derive_registrar.rs | 37 + src/librustc_driver/driver.rs | 159 +- src/librustc_driver/lib.rs | 38 +- src/librustc_driver/pretty.rs | 5 +- src/librustc_driver/test.rs | 79 +- src/librustc_errors/emitter.rs | 271 +- src/librustc_errors/lib.rs | 38 +- src/librustc_errors/lock.rs | 112 + src/librustc_incremental/Cargo.toml | 3 +- src/librustc_incremental/assert_dep_graph.rs | 162 +- .../calculate_svh/caching_codemap_view.rs | 115 + .../calculate_svh/def_path_hash.rs | 36 + src/librustc_incremental/calculate_svh/mod.rs | 233 +- .../calculate_svh/svh_visitor.rs | 510 ++- src/librustc_incremental/lib.rs | 17 +- src/librustc_incremental/persist/data.rs | 13 + src/librustc_incremental/persist/directory.rs | 17 +- .../persist/dirty_clean.rs | 193 +- .../persist/file_format.rs | 122 + src/librustc_incremental/persist/fs.rs | 1058 ++++++ src/librustc_incremental/persist/hash.rs | 131 +- src/librustc_incremental/persist/load.rs | 159 +- src/librustc_incremental/persist/mod.rs | 6 +- src/librustc_incremental/persist/preds.rs | 2 +- src/librustc_incremental/persist/save.rs | 127 +- src/librustc_incremental/persist/util.rs | 95 - .../persist/work_product.rs | 4 +- src/librustc_lint/bad_style.rs | 8 +- src/librustc_lint/builtin.rs | 113 +- src/librustc_lint/lib.rs | 16 +- src/librustc_lint/types.rs | 177 +- src/librustc_lint/unused.rs | 37 +- src/librustc_llvm/build.rs | 8 +- src/librustc_llvm/ffi.rs | 34 +- src/librustc_llvm/lib.rs | 6 + src/{librbml => librustc_macro}/Cargo.toml | 7 +- src/librustc_macro/lib.rs | 169 + src/librustc_metadata/Cargo.toml | 6 +- src/librustc_metadata/astencode.rs | 1475 +-------- src/librustc_metadata/common.rs | 258 -- src/librustc_metadata/creader.rs | 596 ++-- src/librustc_metadata/csearch.rs | 454 +-- src/librustc_metadata/cstore.rs | 153 +- src/librustc_metadata/decoder.rs | 2379 +++++--------- src/librustc_metadata/def_key.rs | 110 - src/librustc_metadata/diagnostics.rs | 200 +- src/librustc_metadata/encoder.rs | 2920 +++++++---------- src/librustc_metadata/index.rs | 139 +- src/librustc_metadata/index_builder.rs | 224 ++ src/librustc_metadata/lib.rs | 38 +- src/librustc_metadata/loader.rs | 94 +- src/librustc_metadata/macro_import.rs | 143 +- src/librustc_metadata/macros.rs | 46 - src/librustc_metadata/schema.rs | 349 ++ src/librustc_metadata/tls_context.rs | 102 - src/librustc_metadata/tydecode.rs | 737 ----- src/librustc_metadata/tyencode.rs | 518 --- src/librustc_mir/build/cfg.rs | 2 +- src/librustc_mir/build/expr/as_rvalue.rs | 9 +- src/librustc_mir/build/matches/mod.rs | 2 +- src/librustc_mir/build/matches/test.rs | 2 +- src/librustc_mir/build/mod.rs | 15 +- src/librustc_mir/build/scope.rs | 9 +- src/librustc_mir/def_use.rs | 197 ++ src/librustc_mir/diagnostics.rs | 25 +- src/librustc_mir/hair/cx/expr.rs | 125 +- src/librustc_mir/hair/cx/mod.rs | 13 +- src/librustc_mir/hair/cx/pattern.rs | 19 +- src/librustc_mir/hair/mod.rs | 8 +- src/librustc_mir/lib.rs | 5 +- src/librustc_mir/pretty.rs | 14 +- src/librustc_mir/transform/copy_prop.rs | 334 ++ src/librustc_mir/transform/deaggregator.rs | 4 +- src/librustc_mir/transform/dump_mir.rs | 4 +- src/librustc_mir/transform/instcombine.rs | 110 + src/librustc_mir/transform/mod.rs | 2 + src/librustc_mir/transform/no_landing_pads.rs | 7 +- src/librustc_mir/transform/promote_consts.rs | 48 +- src/librustc_mir/transform/qualify_consts.rs | 159 +- .../transform/simplify_branches.rs | 2 +- src/librustc_mir/transform/simplify_cfg.rs | 2 +- src/librustc_mir/transform/type_check.rs | 46 +- src/librustc_passes/ast_validation.rs | 48 +- src/librustc_passes/consts.rs | 28 +- src/librustc_passes/diagnostics.rs | 7 + src/librustc_passes/lib.rs | 1 + src/librustc_passes/loops.rs | 2 +- src/librustc_passes/rvalues.rs | 2 +- src/librustc_passes/static_recursion.rs | 30 +- src/librustc_plugin/lib.rs | 2 +- src/librustc_plugin/load.rs | 19 +- src/librustc_plugin/registry.rs | 25 +- src/librustc_privacy/lib.rs | 136 +- src/librustc_resolve/assign_ids.rs | 92 - src/librustc_resolve/build_reduced_graph.rs | 277 +- src/librustc_resolve/check_unused.rs | 4 +- src/librustc_resolve/diagnostics.rs | 40 +- src/librustc_resolve/lib.rs | 887 ++--- src/librustc_resolve/macros.rs | 262 ++ src/librustc_resolve/resolve_imports.rs | 715 ++-- src/librustc_save_analysis/data.rs | 70 +- src/librustc_save_analysis/dump_visitor.rs | 322 +- src/librustc_save_analysis/external_data.rs | 75 +- src/librustc_save_analysis/json_api_dumper.rs | 415 +++ src/librustc_save_analysis/json_dumper.rs | 19 +- src/librustc_save_analysis/lib.rs | 116 +- src/librustc_trans/_match.rs | 2012 ------------ src/librustc_trans/abi.rs | 38 +- src/librustc_trans/adt.rs | 1386 ++------ src/librustc_trans/asm.rs | 19 +- src/librustc_trans/assert_module_sources.rs | 1 - src/librustc_trans/back/link.rs | 110 +- src/librustc_trans/back/linker.rs | 90 +- src/librustc_trans/back/lto.rs | 7 +- .../back}/rpath.rs | 5 +- src/librustc_trans/back/symbol_names.rs | 128 +- src/librustc_trans/back/write.rs | 34 +- src/librustc_trans/base.rs | 1215 +------ src/librustc_trans/cabi_mips64.rs | 168 + src/librustc_trans/cabi_powerpc.rs | 12 +- src/librustc_trans/cabi_s390x.rs | 150 + src/librustc_trans/cabi_x86_64.rs | 2 +- src/librustc_trans/callee.rs | 677 +--- src/librustc_trans/cleanup.rs | 688 +--- src/librustc_trans/closure.rs | 283 +- src/librustc_trans/collector.rs | 251 +- src/librustc_trans/common.rs | 286 +- src/librustc_trans/consts.rs | 1002 +----- src/librustc_trans/context.rs | 105 +- src/librustc_trans/controlflow.rs | 434 --- src/librustc_trans/datum.rs | 828 ----- .../debuginfo/create_scope_map.rs | 485 +-- src/librustc_trans/debuginfo/gdb.rs | 2 +- src/librustc_trans/debuginfo/metadata.rs | 523 ++- src/librustc_trans/debuginfo/mod.rs | 142 +- src/librustc_trans/debuginfo/source_loc.rs | 100 +- src/librustc_trans/debuginfo/type_names.rs | 19 +- src/librustc_trans/debuginfo/utils.rs | 11 +- src/librustc_trans/declare.rs | 5 +- src/librustc_trans/diagnostics.rs | 6 +- src/librustc_trans/expr.rs | 2473 -------------- src/librustc_trans/glue.rs | 413 ++- src/librustc_trans/inline.rs | 52 - src/librustc_trans/intrinsic.rs | 366 +-- src/librustc_trans/lib.rs | 12 +- src/librustc_trans/machine.rs | 29 - src/librustc_trans/meth.rs | 253 +- src/librustc_trans/mir/analyze.rs | 25 +- src/librustc_trans/mir/block.rs | 61 +- src/librustc_trans/mir/constant.rs | 158 +- src/librustc_trans/mir/lvalue.rs | 5 +- src/librustc_trans/mir/mod.rs | 143 +- src/librustc_trans/mir/rvalue.rs | 28 +- src/librustc_trans/mir/statement.rs | 4 +- src/librustc_trans/monomorphize.rs | 204 +- src/librustc_trans/partitioning.rs | 132 +- src/librustc_trans/symbol_names_test.rs | 1 - src/librustc_trans/trans_item.rs | 171 +- src/librustc_trans/tvec.rs | 398 +-- src/librustc_trans/type_.rs | 21 + src/librustc_trans/type_of.rs | 179 +- src/librustc_typeck/astconv.rs | 1036 +++--- src/librustc_typeck/check/_match.rs | 202 +- src/librustc_typeck/check/autoderef.rs | 2 +- src/librustc_typeck/check/callee.rs | 43 +- src/librustc_typeck/check/cast.rs | 11 +- src/librustc_typeck/check/closure.rs | 25 +- src/librustc_typeck/check/coercion.rs | 21 +- src/librustc_typeck/check/compare_method.rs | 247 +- src/librustc_typeck/check/demand.rs | 6 +- src/librustc_typeck/check/dropck.rs | 59 +- src/librustc_typeck/check/intrinsic.rs | 54 +- src/librustc_typeck/check/method/confirm.rs | 138 +- src/librustc_typeck/check/method/mod.rs | 71 +- src/librustc_typeck/check/method/probe.rs | 170 +- src/librustc_typeck/check/method/suggest.rs | 102 +- src/librustc_typeck/check/mod.rs | 1453 ++++---- src/librustc_typeck/check/op.rs | 26 +- src/librustc_typeck/check/regionck.rs | 127 +- src/librustc_typeck/check/upvar.rs | 62 +- src/librustc_typeck/check/wfcheck.rs | 229 +- src/librustc_typeck/check/writeback.rs | 64 +- src/librustc_typeck/check_unused.rs | 2 +- src/librustc_typeck/coherence/mod.rs | 115 +- src/librustc_typeck/coherence/orphan.rs | 39 +- src/librustc_typeck/coherence/overlap.rs | 30 +- src/librustc_typeck/coherence/unsafety.rs | 2 +- src/librustc_typeck/collect.rs | 1153 +++---- .../constrained_type_params.rs | 34 +- src/librustc_typeck/diagnostics.rs | 55 +- src/librustc_typeck/lib.rs | 27 +- src/librustc_typeck/rscope.rs | 76 +- src/librustc_typeck/variance/constraints.rs | 208 +- src/librustc_typeck/variance/mod.rs | 1 - src/librustc_typeck/variance/solve.rs | 74 +- src/librustc_typeck/variance/terms.rs | 135 +- src/librustc_typeck/variance/xform.rs | 6 +- src/librustc_unicode/char.rs | 8 + src/librustc_unicode/lib.rs | 2 + src/librustc_unicode/u_str.rs | 9 +- src/librustdoc/Cargo.toml | 1 + src/librustdoc/clean/inline.rs | 202 +- src/librustdoc/clean/mod.rs | 692 ++-- src/librustdoc/clean/simplify.rs | 35 +- src/librustdoc/core.rs | 54 +- src/librustdoc/doctree.rs | 41 +- src/librustdoc/fold.rs | 7 + src/librustdoc/html/format.rs | 36 +- src/librustdoc/html/highlight.rs | 8 +- src/librustdoc/html/item_type.rs | 71 +- src/librustdoc/html/layout.rs | 6 +- src/librustdoc/html/markdown.rs | 20 +- src/librustdoc/html/render.rs | 668 ++-- .../html/static/jquery-2.1.4.min.js | 4 - src/librustdoc/html/static/main.js | 16 +- src/librustdoc/html/static/playpen.js | 15 +- src/librustdoc/html/static/rustdoc.css | 40 +- src/librustdoc/html/static/styles/main.css | 8 +- src/librustdoc/lib.rs | 55 +- src/librustdoc/passes.rs | 416 --- src/librustdoc/passes/collapse_docs.rs | 47 + src/librustdoc/passes/mod.rs | 204 ++ src/librustdoc/passes/strip_hidden.rs | 66 + src/librustdoc/passes/strip_priv_imports.rs | 18 + src/librustdoc/passes/strip_private.rs | 38 + src/librustdoc/passes/unindent_comments.rs | 168 + src/librustdoc/test.rs | 23 +- src/librustdoc/visit_ast.rs | 38 +- src/librustdoc/visit_lib.rs | 48 +- src/libserialize/collection_impls.rs | 8 +- src/libserialize/hex.rs | 2 +- src/libserialize/json.rs | 26 +- src/{librbml => libserialize}/leb128.rs | 0 src/libserialize/lib.rs | 13 +- src/{librbml => libserialize}/opaque.rs | 279 +- src/libserialize/serialize.rs | 335 +- src/libstd/Cargo.toml | 1 + src/libstd/ascii.rs | 7 +- src/libstd/build.rs | 8 +- src/libstd/collections/hash/map.rs | 126 +- src/libstd/collections/hash/set.rs | 59 +- src/libstd/collections/hash/table.rs | 20 +- src/libstd/env.rs | 30 +- src/libstd/error.rs | 44 +- src/libstd/ffi/c_str.rs | 75 +- src/libstd/ffi/os_str.rs | 38 +- src/libstd/fs.rs | 29 +- src/libstd/io/buffered.rs | 12 +- src/libstd/io/cursor.rs | 2 - src/libstd/io/error.rs | 10 +- src/libstd/io/impls.rs | 4 - src/libstd/io/lazy.rs | 2 - src/libstd/io/mod.rs | 41 +- src/libstd/io/stdio.rs | 1 - src/libstd/io/util.rs | 2 - src/libstd/lib.rs | 23 +- src/libstd/macros.rs | 14 +- src/libstd/net/addr.rs | 3 - src/libstd/net/ip.rs | 19 +- src/libstd/net/mod.rs | 4 +- src/libstd/net/parser.rs | 4 +- src/libstd/net/tcp.rs | 12 +- src/libstd/net/test.rs | 2 - src/libstd/net/udp.rs | 2 - src/libstd/num/mod.rs | 4 - src/libstd/os/haiku/fs.rs | 138 + .../os/haiku/mod.rs} | 12 +- src/libstd/os/haiku/raw.rs | 74 + src/libstd/os/linux/raw.rs | 10 + src/libstd/os/mod.rs | 1 + src/libstd/os/raw.rs | 6 +- src/libstd/panic.rs | 1 - src/libstd/panicking.rs | 31 +- src/libstd/path.rs | 278 +- src/libstd/primitive_docs.rs | 89 +- src/libstd/process.rs | 2 - src/libstd/rt.rs | 1 - src/libstd/rtdeps.rs | 4 + src/libstd/sync/barrier.rs | 2 - src/libstd/sync/condvar.rs | 19 +- src/libstd/sync/mpsc/blocking.rs | 2 - src/libstd/sync/mpsc/mod.rs | 9 +- src/libstd/sync/mpsc/mpsc_queue.rs | 2 - src/libstd/sync/mpsc/oneshot.rs | 2 +- src/libstd/sync/mpsc/select.rs | 4 +- src/libstd/sync/mpsc/shared.rs | 15 +- src/libstd/sync/mpsc/spsc_queue.rs | 2 - src/libstd/sync/mpsc/stream.rs | 3 +- src/libstd/sync/mpsc/sync.rs | 14 +- src/libstd/sync/mutex.rs | 5 +- src/libstd/sync/once.rs | 2 - src/libstd/sync/rwlock.rs | 7 +- src/libstd/sys/common/args.rs | 9 +- src/libstd/sys/common/at_exit_imp.rs | 2 - src/libstd/sys/common/backtrace.rs | 71 +- src/libstd/sys/common/condvar.rs | 7 + src/libstd/sys/common/io.rs | 3 - src/libstd/sys/common/mod.rs | 1 - src/libstd/sys/common/mutex.rs | 1 - src/libstd/sys/common/net.rs | 12 +- src/libstd/sys/common/poison.rs | 5 +- src/libstd/sys/common/remutex.rs | 3 - src/libstd/sys/common/thread.rs | 2 - src/libstd/sys/common/thread_info.rs | 1 - src/libstd/sys/common/thread_local.rs | 1 - src/libstd/sys/common/wtf8.rs | 3 - src/libstd/sys/unix/condvar.rs | 67 +- src/libstd/sys/unix/ext/ffi.rs | 1 - src/libstd/sys/unix/ext/net.rs | 2 - src/libstd/sys/unix/ext/process.rs | 2 - src/libstd/sys/unix/fd.rs | 16 +- src/libstd/sys/unix/fs.rs | 15 +- src/libstd/sys/unix/mod.rs | 1 + src/libstd/sys/unix/net.rs | 13 +- src/libstd/sys/unix/os.rs | 69 +- src/libstd/sys/unix/os_str.rs | 2 - src/libstd/sys/unix/pipe.rs | 2 - src/libstd/sys/unix/process.rs | 2 - src/libstd/sys/unix/rand.rs | 121 +- src/libstd/sys/unix/rwlock.rs | 4 +- src/libstd/sys/unix/stdio.rs | 2 - src/libstd/sys/unix/thread.rs | 11 +- src/libstd/sys/windows/compat.rs | 2 - src/libstd/sys/windows/condvar.rs | 3 + src/libstd/sys/windows/dynamic_lib.rs | 1 - src/libstd/sys/windows/fs.rs | 3 +- src/libstd/sys/windows/handle.rs | 2 - src/libstd/sys/windows/mod.rs | 2 - src/libstd/sys/windows/mutex.rs | 2 - src/libstd/sys/windows/net.rs | 2 - src/libstd/sys/windows/os.rs | 1 - src/libstd/sys/windows/os_str.rs | 3 - src/libstd/sys/windows/pipe.rs | 1 - src/libstd/sys/windows/process.rs | 3 - src/libstd/sys/windows/stdio.rs | 1 - src/libstd/sys/windows/thread.rs | 2 - src/libstd/sys/windows/thread_local.rs | 2 - src/libstd/thread/local.rs | 7 - src/libstd/thread/mod.rs | 59 +- src/libstd/time/duration.rs | 221 +- src/libstd/time/mod.rs | 12 + src/libsyntax/abi.rs | 4 + src/libsyntax/ast.rs | 166 +- src/libsyntax/attr.rs | 395 ++- src/libsyntax/codemap.rs | 101 +- src/libsyntax/config.rs | 234 +- src/libsyntax/diagnostic_list.rs | 18 + src/libsyntax/ext/base.rs | 472 +-- src/libsyntax/ext/build.rs | 77 +- src/libsyntax/ext/expand.rs | 1279 ++++---- src/libsyntax/ext/hygiene.rs | 15 +- src/libsyntax/ext/placeholders.rs | 239 ++ src/libsyntax/ext/proc_macro_shim.rs | 71 + src/libsyntax/ext/source_util.rs | 8 +- src/libsyntax/ext/tt/macro_parser.rs | 4 +- src/libsyntax/ext/tt/macro_rules.rs | 277 +- src/libsyntax/feature_gate.rs | 220 +- src/libsyntax/fold.rs | 172 +- src/libsyntax/lib.rs | 7 +- src/libsyntax/parse/attr.rs | 70 +- src/libsyntax/parse/lexer/mod.rs | 35 +- src/libsyntax/parse/mod.rs | 89 +- src/libsyntax/parse/obsolete.rs | 15 +- src/libsyntax/parse/parser.rs | 830 ++--- src/libsyntax/parse/token.rs | 41 +- src/libsyntax/print/pprust.rs | 56 +- src/libsyntax/ptr.rs | 21 +- src/libsyntax/test.rs | 48 +- src/libsyntax/tokenstream.rs | 13 + src/libsyntax/util/small_vector.rs | 6 + src/libsyntax/visit.rs | 19 +- src/libsyntax_ext/Cargo.toml | 3 +- src/libsyntax_ext/asm.rs | 80 +- src/libsyntax_ext/cfg.rs | 2 +- src/libsyntax_ext/concat.rs | 8 +- src/libsyntax_ext/concat_idents.rs | 36 +- src/libsyntax_ext/deriving/bounds.rs | 1 + src/libsyntax_ext/deriving/clone.rs | 164 +- src/libsyntax_ext/deriving/cmp/eq.rs | 56 +- src/libsyntax_ext/deriving/cmp/ord.rs | 3 +- src/libsyntax_ext/deriving/cmp/partial_eq.rs | 1 + src/libsyntax_ext/deriving/cmp/partial_ord.rs | 3 +- src/libsyntax_ext/deriving/custom.rs | 99 + src/libsyntax_ext/deriving/debug.rs | 3 +- src/libsyntax_ext/deriving/decodable.rs | 7 +- src/libsyntax_ext/deriving/default.rs | 5 +- src/libsyntax_ext/deriving/encodable.rs | 1 + src/libsyntax_ext/deriving/generic/mod.rs | 130 +- src/libsyntax_ext/deriving/generic/ty.rs | 135 +- src/libsyntax_ext/deriving/hash.rs | 3 +- src/libsyntax_ext/deriving/mod.rs | 315 +- src/libsyntax_ext/env.rs | 80 +- src/libsyntax_ext/format.rs | 172 +- src/libsyntax_ext/lib.rs | 101 +- src/libsyntax_ext/log_syntax.rs | 4 +- src/libsyntax_ext/rustc_macro_registrar.rs | 277 ++ src/libsyntax_ext/trace_macros.rs | 4 +- src/libsyntax_pos/lib.rs | 125 +- src/libterm/lib.rs | 2 +- src/libterm/terminfo/parm.rs | 2 +- src/libterm/terminfo/searcher.rs | 2 + src/libtest/lib.rs | 8 +- src/libunwind/build.rs | 2 +- src/libunwind/libunwind.rs | 7 + src/rt/rust_test_helpers.c | 21 + src/rtstartup/rsbegin.rs | 11 +- src/rtstartup/rsend.rs | 9 +- src/rust-installer/install-template.sh | 22 + src/rust-installer/test.sh | 50 + .../test/image-docdir1/share/doc/rust/README | 1 + .../image-docdir1/share/doc/rust/rustdocs.txt | 1 + .../test/image-docdir2/share/doc/cargo/README | 1 + .../share/doc/cargo/cargodocs.txt | 1 + src/rustc/Cargo.lock | 24 +- src/rustc/std_shim/Cargo.lock | 12 + src/rustc/std_shim/Cargo.toml | 1 + src/rustc/std_shim/lib.rs | 6 + src/rustllvm/ArchiveWrapper.cpp | 24 +- src/rustllvm/PassWrapper.cpp | 21 +- src/rustllvm/RustWrapper.cpp | 119 +- src/rustllvm/rustllvm.h | 11 +- src/stage0.txt | 5 +- .../instantiation-through-vtable.rs | 3 +- .../trait-method-default-impl.rs | 10 +- .../codegen-units/item-collection/unsizing.rs | 3 +- .../partitioning/local-inlining.rs | 2 +- .../partitioning/local-transitive-inlining.rs | 4 +- .../partitioning/vtable-through-const.rs | 3 +- src/test/codegen/abi-sysv64.rs | 24 + src/test/codegen/adjustments.rs | 8 +- src/test/codegen/coercions.rs | 3 - src/test/codegen/consts.rs | 20 +- src/test/codegen/drop.rs | 10 +- src/test/codegen/likely.rs | 41 + src/test/codegen/link_section.rs | 4 +- src/test/codegen/loads.rs | 4 +- src/test/codegen/mir_zst_stores.rs | 2 - src/test/codegen/naked-functions.rs | 6 +- src/test/codegen/refs.rs | 11 +- src/test/codegen/stores.rs | 11 +- .../auxiliary/macro_crate_MacroRulesTT.rs | 26 - .../auxiliary/macro_crate_test.rs | 21 +- .../lint-plugin-forbid-attrs.rs | 6 +- src/test/compile-fail-fulldeps/qquote.rs | 4 +- .../rustc-macro/append-impl.rs | 33 + .../rustc-macro/at-the-root.rs | 25 + .../rustc-macro/attribute.rs | 46 + .../rustc-macro/auxiliary/append-impl.rs | 31 + .../rustc-macro/auxiliary/derive-a-2.rs | 25 + .../rustc-macro/auxiliary/derive-a.rs | 25 + .../rustc-macro/auxiliary/derive-bad.rs | 26 + .../rustc-macro/auxiliary/derive-panic.rs | 25 + .../auxiliary/derive-unstable-2.rs | 29 + .../rustc-macro/auxiliary/derive-unstable.rs | 26 + .../rustc-macro/cannot-link.rs | 16 + .../rustc-macro/define-two.rs | 28 + .../rustc-macro/derive-bad.rs | 25 + .../rustc-macro/derive-still-gated.rs | 22 + .../rustc-macro/expand-to-unstable-2.rs | 25 + .../rustc-macro/expand-to-unstable.rs | 25 + .../rustc-macro/export-macro.rs | 19 + .../rustc-macro/exports.rs | 22 + .../rustc-macro/feature-gate-1.rs | 13 + .../rustc-macro/feature-gate-2.rs | 13 + .../rustc-macro/feature-gate-3.rs | 15 + .../rustc-macro/feature-gate-4.rs | 15 + .../rustc-macro/feature-gate-5.rs | 12 + .../rustc-macro/import.rs | 22 + .../rustc-macro/load-panic.rs | 23 + .../require-rustc-macro-crate-type.rs | 21 + .../rustc-macro/shadow-builtin.rs | 22 + .../rustc-macro/shadow.rs | 21 + .../rustc-macro/signature.rs | 24 + .../rustc-macro/two-crate-types-1.rs | 14 + .../rustc-macro/two-crate-types-2.rs | 12 + src/test/compile-fail/E0005.rs | 1 + src/test/compile-fail/E0009.rs | 5 +- src/test/compile-fail/E0017.rs | 1 + src/test/compile-fail/E0030.rs | 4 +- src/test/compile-fail/E0033.rs | 13 +- src/test/compile-fail/E0034.rs | 14 +- src/test/compile-fail/E0040.rs | 2 +- src/test/compile-fail/E0049.rs | 3 +- src/test/compile-fail/E0053.rs | 14 +- src/test/compile-fail/E0060.rs | 3 +- src/test/compile-fail/E0061.rs | 10 +- src/test/compile-fail/E0063.rs | 42 +- src/test/compile-fail/E0076.rs | 4 +- src/test/compile-fail/E0084.rs | 4 +- src/test/compile-fail/E0087.rs | 2 +- src/test/compile-fail/E0089.rs | 4 +- src/test/compile-fail/E0093.rs | 4 +- src/test/compile-fail/E0132.rs | 2 +- src/test/compile-fail/E0133.rs | 2 +- src/test/compile-fail/E0164.rs | 1 + src/test/compile-fail/E0165.rs | 1 + src/test/compile-fail/E0184.rs | 2 + src/test/compile-fail/E0194.rs | 6 +- src/test/compile-fail/E0195.rs | 1 + src/test/compile-fail/E0207.rs | 2 +- src/test/compile-fail/E0221.rs | 6 +- src/test/compile-fail/E0232.rs | 5 +- src/test/compile-fail/E0259.rs | 6 +- src/test/compile-fail/E0260.rs | 5 +- src/test/compile-fail/E0277.rs | 5 +- src/test/compile-fail/E0297.rs | 4 +- src/test/compile-fail/E0365.rs | 5 +- src/test/compile-fail/E0375.rs | 8 +- src/test/compile-fail/E0389.rs | 1 + src/test/compile-fail/E0392.rs | 1 + src/test/compile-fail/E0393.rs | 5 +- src/test/compile-fail/E0394.rs | 5 +- src/test/compile-fail/E0395.rs | 2 +- src/test/compile-fail/E0396.rs | 1 + src/test/compile-fail/E0403.rs | 2 + src/test/compile-fail/E0407.rs | 4 +- src/test/compile-fail/E0409.rs | 7 +- src/test/compile-fail/E0422.rs | 4 +- src/test/compile-fail/E0423.rs | 16 + src/test/compile-fail/E0424.rs | 25 + src/test/compile-fail/E0425.rs | 18 + src/test/compile-fail/E0426.rs | 17 + src/test/compile-fail/E0428.rs | 19 + src/test/compile-fail/E0429.rs | 15 + src/test/compile-fail/E0430.rs | 15 + src/test/compile-fail/E0431.rs | 14 + src/test/compile-fail/E0432.rs | 14 + src/test/compile-fail/E0433.rs | 13 + src/test/compile-fail/E0434.rs | 19 + src/test/compile-fail/E0435.rs | 15 + src/test/compile-fail/E0437.rs | 19 + src/test/compile-fail/E0438.rs | 21 + src/test/compile-fail/E0439.rs | 18 + src/test/compile-fail/E0440.rs | 22 + src/test/compile-fail/E0441.rs | 21 + src/test/compile-fail/E0442.rs | 29 + src/test/compile-fail/E0443.rs | 23 + src/test/compile-fail/E0444.rs | 21 + src/test/compile-fail/E0445.rs | 25 + src/test/compile-fail/E0446.rs | 20 + src/test/compile-fail/E0449.rs | 28 + src/test/compile-fail/E0450.rs | 21 + src/test/compile-fail/E0451.rs | 36 + src/test/compile-fail/E0452.rs | 14 + src/test/compile-fail/E0453.rs | 18 + src/test/compile-fail/E0454.rs | 16 + src/test/compile-fail/E0458.rs | 17 + src/test/compile-fail/E0459.rs | 15 + src/test/compile-fail/E0463.rs | 18 + ...{feature-gate-try-operator.rs => E0478.rs} | 8 +- src/test/compile-fail/E0492.rs | 17 + src/test/compile-fail/E0494.rs | 19 + src/test/compile-fail/E0496.rs | 23 + src/test/compile-fail/E0499.rs | 15 + src/test/compile-fail/E0501.rs | 25 + src/test/compile-fail/E0502.rs | 18 + src/test/compile-fail/E0503.rs | 15 + src/test/compile-fail/E0504.rs | 25 + src/test/compile-fail/E0505.rs | 21 + src/test/compile-fail/E0506.rs | 21 + src/test/compile-fail/E0507.rs | 23 + src/test/compile-fail/E0508.rs | 16 + .../E0509.rs} | 20 +- src/test/compile-fail/E0511.rs | 19 + src/test/compile-fail/E0512.rs | 16 + src/test/compile-fail/E0513.rs | 19 + src/test/compile-fail/E0516.rs | 14 + src/test/compile-fail/E0517.rs | 29 + src/test/compile-fail/E0518.rs | 21 + src/test/compile-fail/E0520.rs | 34 + src/test/compile-fail/E0522.rs | 16 + src/test/compile-fail/E0527.rs | 22 + src/test/compile-fail/E0528.rs | 21 + src/test/compile-fail/E0529.rs | 21 + src/test/compile-fail/E0530.rs | 18 + src/test/compile-fail/E0534.rs | 14 + src/test/compile-fail/E0558.rs | 17 + src/test/compile-fail/E0559.rs | 19 + src/test/compile-fail/E0560.rs | 19 + src/test/compile-fail/E0565-1.rs | 17 + src/test/compile-fail/E0565.rs | 17 + src/test/compile-fail/asm-bad-clobber.rs | 1 + src/test/compile-fail/asm-in-bad-modifier.rs | 2 + src/test/compile-fail/asm-misplaced-option.rs | 1 + src/test/compile-fail/asm-out-assign-imm.rs | 2 + src/test/compile-fail/asm-out-no-modifier.rs | 2 + src/test/compile-fail/asm-out-read-uninit.rs | 2 + .../associated-const-impl-wrong-type.rs | 2 +- .../associated-const-type-parameter-arrays.rs | 3 +- ...ed-types-ICE-when-projecting-out-of-err.rs | 3 +- .../associated-types/issue-36499.rs | 15 + src/test/compile-fail/attr-literals.rs | 33 + src/test/compile-fail/attr-usage-repr.rs | 2 +- .../compile-fail/auxiliary/extern-statics.rs | 14 + .../compile-fail/auxiliary/issue-36708.rs | 15 + .../compile-fail/auxiliary/issue_19452_aux.rs | 13 + src/test/compile-fail/bad-expr-path.rs | 2 +- src/test/compile-fail/bad-expr-path2.rs | 2 +- src/test/compile-fail/bad-module.rs | 8 +- src/test/compile-fail/bad-sized.rs | 2 + .../borrowck/borrowck-box-insensitivity.rs | 2 + .../borrowck-let-suggestion-suffixes.rs | 29 +- .../borrowck/borrowck-let-suggestion.rs | 22 - .../borrowck/borrowck-move-out-of-vec-tail.rs | 2 +- .../borrowck/borrowck-union-borrow-nested.rs | 40 + .../borrowck/borrowck-union-borrow.rs | 93 + .../borrowck/borrowck-union-move-assign.rs | 42 + .../borrowck/borrowck-union-move.rs | 96 + .../borrowck/borrowck-union-uninitialized.rs | 30 + .../borrowck/borrowck-vec-pattern-nesting.rs | 2 +- src/test/compile-fail/cast-rfc0401.rs | 8 +- src/test/compile-fail/coerce-mut.rs | 2 +- .../compile-fail/coherence-impls-sized.rs | 3 + .../compile-fail/conflicting-repr-hints.rs | 30 + src/test/compile-fail/const-deref-ptr.rs | 1 + src/test/compile-fail/const-fn-mismatch.rs | 2 +- .../compile-fail/const-fn-not-in-trait.rs | 8 +- src/test/compile-fail/const-unsized.rs | 4 + src/test/compile-fail/deriving-non-type.rs | 18 +- .../enum-and-module-in-same-scope.rs | 3 +- .../compile-fail/feature-gate-abi-sysv64.rs | 19 + .../feature-gate-compiler-builtins.rs | 14 + .../feature-gate-static-in-const.rs | 14 + src/test/compile-fail/fn-variance-1.rs | 4 +- src/test/compile-fail/gated-attr-literals.rs | 44 + .../impl-trait/auto-trait-leak.rs | 4 + .../compile-fail/impl-trait/loan-extend.rs | 6 +- .../compile-fail/impl-wrong-item-for-trait.rs | 6 +- src/test/compile-fail/import-from-missing.rs | 4 +- src/test/compile-fail/import.rs | 7 +- src/test/compile-fail/import2.rs | 4 +- src/test/compile-fail/imports/duplicate.rs | 70 + src/test/compile-fail/imports/reexports.rs | 44 + src/test/compile-fail/imports/unused.rs | 38 + src/test/compile-fail/issue-11004.rs | 39 + src/test/compile-fail/issue-12567.rs | 8 +- src/test/compile-fail/issue-12612.rs | 4 +- src/test/compile-fail/issue-13404.rs | 4 +- src/test/compile-fail/issue-14254.rs | 62 +- src/test/compile-fail/issue-16048.rs | 1 + src/test/compile-fail/issue-1697.rs | 3 +- src/test/compile-fail/issue-17800.rs | 2 +- src/test/compile-fail/issue-18252.rs | 4 +- src/test/compile-fail/issue-18819.rs | 4 +- src/test/compile-fail/issue-19452.rs | 11 +- src/test/compile-fail/issue-19922.rs | 3 +- src/test/compile-fail/issue-20433.rs | 18 + src/test/compile-fail/issue-21146.rs | 2 +- src/test/compile-fail/issue-21546.rs | 6 + src/test/compile-fail/issue-21950.rs | 7 +- src/test/compile-fail/issue-22370.rs | 4 +- src/test/compile-fail/issue-22560.rs | 10 +- src/test/compile-fail/issue-23080-2.rs | 2 +- src/test/compile-fail/issue-23080.rs | 2 +- src/test/compile-fail/issue-23302.rs | 8 + src/test/compile-fail/issue-2356.rs | 22 +- src/test/compile-fail/issue-24204.rs | 27 + src/test/compile-fail/issue-24446.rs | 1 - src/test/compile-fail/issue-25793.rs | 1 + src/test/compile-fail/issue-25826.rs | 1 + src/test/compile-fail/issue-26548.rs | 4 +- src/test/compile-fail/issue-26812.rs | 4 +- src/test/compile-fail/issue-28324.rs | 18 + src/test/compile-fail/issue-28625.rs | 30 + src/test/compile-fail/issue-28776.rs | 2 +- src/test/compile-fail/issue-2937.rs | 3 +- src/test/compile-fail/issue-30007.rs | 2 - src/test/compile-fail/issue-3044.rs | 2 +- src/test/compile-fail/issue-30560.rs | 8 +- src/test/compile-fail/issue-31109.rs | 4 +- src/test/compile-fail/issue-31769.rs | 2 +- src/test/compile-fail/issue-32655.rs | 7 +- src/test/compile-fail/issue-32709.rs | 4 +- src/test/compile-fail/issue-32833.rs | 6 +- src/test/compile-fail/issue-32950.rs | 2 +- src/test/compile-fail/issue-32963.rs | 1 + src/test/compile-fail/issue-33876.rs | 3 - src/test/compile-fail/issue-35668.rs | 24 + src/test/compile-fail/issue-35869.rs | 37 + src/test/compile-fail/issue-36053-2.rs | 21 + src/test/compile-fail/issue-36299.rs | 15 + src/test/compile-fail/issue-36617.rs | 11 + src/test/compile-fail/issue-36638.rs | 19 + src/test/compile-fail/issue-36708.rs | 23 + src/test/compile-fail/issue-4736.rs | 2 +- src/test/compile-fail/issue-4935.rs | 1 - src/test/compile-fail/issue-5035.rs | 3 +- src/test/compile-fail/issue-5067.rs | 62 + src/test/compile-fail/issue-8208.rs | 9 +- src/test/compile-fail/linkage2.rs | 2 +- src/test/compile-fail/linkage3.rs | 2 +- src/test/compile-fail/lint-dead-code-4.rs | 10 +- src/test/compile-fail/lint-forbid-attr.rs | 6 +- .../lint-no-drop-on-repr-extern.rs | 61 - .../loops-reject-duplicate-labels-2.rs | 33 +- .../loops-reject-duplicate-labels.rs | 32 +- ...loops-reject-labels-shadowing-lifetimes.rs | 36 +- .../loops-reject-lifetime-shadowing-label.rs | 3 +- src/test/compile-fail/macro-context.rs | 5 - src/test/compile-fail/macro-error.rs | 2 - .../compile-fail/macro-expansion-tests.rs | 46 + src/test/compile-fail/method-call-err-msg.rs | 3 +- .../compile-fail/mir-dataflow/def-inits-1.rs | 1 - src/test/compile-fail/mir-dataflow/inits-1.rs | 1 - .../compile-fail/mir-dataflow/uninits-1.rs | 1 - .../compile-fail/mir-dataflow/uninits-2.rs | 3 +- src/test/compile-fail/missing-block-hint.rs | 20 + src/test/compile-fail/move-out-of-slice-1.rs | 21 + .../compile-fail/mut-pattern-mismatched.rs | 4 +- src/test/compile-fail/not-enough-arguments.rs | 4 +- src/test/compile-fail/numeric-fields.rs | 8 +- .../on-unimplemented/multiple-impls.rs | 3 + .../compile-fail/on-unimplemented/on-impl.rs | 1 + .../compile-fail/on-unimplemented/on-trait.rs | 2 + .../on-unimplemented/slice-index.rs | 2 + src/test/compile-fail/overloaded-calls-bad.rs | 2 - .../paths-in-macro-invocations.rs | 38 + .../compile-fail/platform-intrinsic-params.rs | 16 + .../restricted/tuple-struct-fields/test.rs | 2 +- .../restricted/tuple-struct-fields/test2.rs | 2 +- .../restricted/tuple-struct-fields/test3.rs | 2 +- .../privacy/union-field-privacy-1.rs | 30 + .../privacy/union-field-privacy-2.rs | 28 + src/test/compile-fail/privacy2.rs | 6 +- src/test/compile-fail/privacy3.rs | 3 +- src/test/compile-fail/ptr-coercion.rs | 6 +- ...cr-comp.rs => question-mark-type-infer.rs} | 19 +- .../region-borrow-params-issue-29793-small.rs | 24 +- .../regions-escape-loop-via-vec.rs | 9 + src/test/compile-fail/resolve-hint-macro.rs | 4 +- src/test/compile-fail/resolve-label.rs | 3 + .../compile-fail/resolve_self_super_hint.rs | 12 +- src/test/compile-fail/rfc1592-deprecated.rs | 32 - src/test/compile-fail/rfc1623.rs | 101 + .../compile-fail/safe-extern-statics-mut.rs | 28 + src/test/compile-fail/safe-extern-statics.rs | 32 + src/test/compile-fail/self_type_keyword.rs | 3 +- .../send-is-not-static-ensures-scoping.rs | 2 +- src/test/compile-fail/shadowed-lifetime.rs | 6 +- src/test/compile-fail/slice-mut.rs | 2 +- .../stmt_expr_attrs_no_feature.rs | 16 +- .../struct-fields-hints-no-dupe.rs | 5 +- src/test/compile-fail/struct-fields-hints.rs | 5 +- .../compile-fail/struct-fields-too-many.rs | 4 +- .../compile-fail/suggest-private-fields.rs | 20 +- src/test/compile-fail/super-at-top-level.rs | 3 +- .../compile-fail/syntax-extension-minor.rs | 2 +- .../compile-fail/task-rng-isnt-sendable.rs | 5 +- .../compile-fail/token-error-correct-2.rs | 1 + .../compile-fail/token-error-correct-3.rs | 1 + src/test/compile-fail/token-error-correct.rs | 2 + .../compile-fail/trait-duplicate-methods.rs | 1 + src/test/compile-fail/trait-safety-fn-body.rs | 2 +- .../trait-suggest-where-clause.rs | 7 + ...its-inductive-overflow-auto-normal-auto.rs | 32 - ...its-inductive-overflow-supertrait-oibit.rs | 4 +- .../typeck-auto-trait-no-supertraits-2.rs | 25 + .../typeck-auto-trait-no-supertraits.rs | 49 + .../typeck-auto-trait-no-typeparams.rs | 14 + ...typeck-default-trait-impl-outside-crate.rs | 5 +- .../typeck-default-trait-impl-superregion.rs | 27 - .../typeck-default-trait-impl-supertrait.rs | 29 - ...default-trait-impl-trait-where-clause-2.rs | 36 - ...k-default-trait-impl-trait-where-clause.rs | 36 - .../unboxed-closures-failed-recursive-fn-1.rs | 2 +- .../compile-fail/union/union-const-eval.rs | 26 + .../compile-fail/union/union-const-pat.rs | 25 + src/test/compile-fail/union/union-copy.rs | 26 + .../compile-fail/union/union-derive-clone.rs | 41 + .../compile-fail/union/union-derive-eq.rs | 30 + src/test/compile-fail/union/union-derive.rs | 28 + src/test/compile-fail/union/union-empty.rs | 15 + .../compile-fail/union/union-feature-gate.rs | 15 + src/test/compile-fail/union/union-fields.rs | 37 + src/test/compile-fail/union/union-generic.rs | 24 + .../union/union-lint-dead-code.rs | 26 + .../union/union-nonrepresentable.rs | 18 + src/test/compile-fail/union/union-repr-c.rs | 29 + .../compile-fail/union/union-suggest-field.rs | 30 + src/test/compile-fail/union/union-unsafe.rs | 24 + src/test/compile-fail/union/union-unsized.rs | 23 + .../union/union-with-drop-fields-lint.rs | 40 + src/test/compile-fail/unresolved-import.rs | 32 +- src/test/compile-fail/unsafe-const-fn.rs | 2 +- src/test/compile-fail/unsized6.rs | 4 +- src/test/compile-fail/use-from-trait-xc.rs | 4 +- src/test/compile-fail/use-from-trait.rs | 6 +- src/test/compile-fail/use-keyword.rs | 11 +- src/test/compile-fail/use-mod-2.rs | 6 +- .../compile-fail/user-defined-macro-rules.rs | 11 + src/test/compile-fail/variadic-ffi-3.rs | 8 +- .../compile-fail/variance-associated-types.rs | 4 +- .../compile-fail/variance-object-types.rs | 2 +- .../compile-fail/variance-region-bounds.rs | 4 +- .../compile-fail/variance-regions-direct.rs | 14 +- .../compile-fail/variance-regions-indirect.rs | 10 +- .../compile-fail/variance-trait-bounds.rs | 18 +- .../variance-trait-object-bound.rs | 2 +- .../compile-fail/variance-types-bounds.rs | 20 +- src/test/compile-fail/variance-types.rs | 12 +- .../debuginfo/auxiliary/macro-stepping.rs | 20 + .../debuginfo/lexical-scope-with-macro.rs | 2 +- src/test/debuginfo/macro-stepping.rs | 103 + src/test/debuginfo/simd.rs | 5 + src/test/debuginfo/union-smoke.rs | 50 + src/test/incremental/cache_file_headers.rs | 29 + src/test/incremental/crate_hash_reorder.rs | 39 + src/test/incremental/hashes/struct_defs.rs | 238 ++ src/test/incremental/issue-35593.rs | 21 + src/test/incremental/krate-inherent.rs | 34 + src/test/incremental/krate-inlined.rs | 31 + .../auxiliary/a.rs | 18 + .../remove-private-item-cross-crate/main.rs | 28 + src/test/incremental/source_loc_macros.rs | 63 + .../span_hash_stable/auxiliary/mod.rs | 17 + .../span_hash_stable/auxiliary/sub1.rs | 15 + .../span_hash_stable/auxiliary/sub2.rs | 15 + src/test/incremental/span_hash_stable/main.rs | 34 + .../spans_insignificant_w_o_debuginfo.rs | 25 + .../spans_significant_w_debuginfo.rs | 25 + src/test/incremental/string_constant.rs | 10 +- .../incremental/struct_change_field_name.rs | 2 +- src/test/mir-opt/storage_ranges.rs | 44 +- src/test/parse-fail/doc-after-struct-field.rs | 20 + .../parse-fail/doc-before-extern-rbrace.rs | 2 +- src/test/parse-fail/doc-before-fn-rbrace.rs | 16 + .../doc-before-identifier.rs} | 14 +- src/test/parse-fail/doc-before-mod-rbrace.rs | 15 + src/test/parse-fail/doc-before-rbrace.rs | 3 +- src/test/parse-fail/doc-before-semi.rs | 3 +- .../parse-fail/doc-before-struct-rbrace-1.rs | 21 + .../parse-fail/doc-before-struct-rbrace-2.rs | 20 + src/test/parse-fail/extern-no-fn.rs | 2 +- src/test/parse-fail/issue-21153.rs | 3 +- src/test/parse-fail/obsolete-closure-kind.rs | 18 - src/test/parse-fail/suffixed-literal-meta.rs | 25 + .../attr-literals.rs} | 16 +- src/test/run-fail-fulldeps/qquote.rs | 6 +- src/test/run-fail/bounds-check-no-overflow.rs | 2 +- src/test/run-fail/issue-30380.rs | 3 - src/test/run-fail/mir_drop_panics.rs | 2 - src/test/run-fail/mir_dynamic_drops_1.rs | 2 - src/test/run-fail/mir_dynamic_drops_2.rs | 3 +- src/test/run-fail/mir_dynamic_drops_3.rs | 3 +- src/test/run-fail/mir_indexing_oob_1.rs | 2 - src/test/run-fail/mir_indexing_oob_2.rs | 2 - src/test/run-fail/mir_indexing_oob_3.rs | 2 - .../mir_trans_calls_converging_drops.rs | 3 - .../mir_trans_calls_converging_drops_2.rs | 3 - .../run-fail/mir_trans_calls_diverging.rs | 3 +- .../mir_trans_calls_diverging_drops.rs | 3 - .../run-fail/mir_trans_no_landing_pads.rs | 3 +- .../mir_trans_no_landing_pads_diverging.rs | 3 +- ...owing-pow.rs => overflowing-pow-signed.rs} | 0 src/test/run-fail/overflowing-pow-unsigned.rs | 16 + src/test/run-make/atomic-lock-free/Makefile | 12 + src/test/run-make/link-arg/Makefile | 5 + src/test/run-make/link-arg/empty.rs | 11 + src/test/run-make/no-builtins-lto/Makefile | 9 + src/test/run-make/no-builtins-lto/main.rs | 13 + .../run-make/no-builtins-lto/no_builtins.rs | 12 + src/test/run-make/no-duplicate-libs/bar.rs | 3 +- src/test/run-make/no-duplicate-libs/foo.rs | 3 +- src/test/run-make/save-analysis/Makefile | 1 + src/test/run-make/save-analysis/foo.rs | 8 +- src/test/run-make/sepcomp-inlining/Makefile | 13 +- .../auxiliary/cond_noprelude_plugin.rs | 65 + .../auxiliary/cond_plugin.rs | 66 + .../auxiliary/cond_prelude_plugin.rs | 60 + .../auxiliary/custom_derive_partial_eq.rs | 81 + .../auxiliary/custom_derive_plugin.rs | 2 +- .../auxiliary/custom_derive_plugin_attr.rs | 7 +- .../auxiliary/dummy_mir_pass.rs | 4 +- .../auxiliary/macro_crate_test.rs | 107 +- .../auxiliary/plugin_args.rs | 4 +- .../auxiliary/proc_macro_def.rs | 56 + .../custom-derive-partial-eq.rs | 22 + .../run-pass-fulldeps/deprecated-derive.rs | 1 + .../empty-struct-braces-derive.rs | 24 +- src/test/run-pass-fulldeps/issue-11881.rs | 12 +- .../run-pass-fulldeps/lint-group-plugin.rs | 1 + .../lint-plugin-cmdline-load.rs | 2 + src/test/run-pass-fulldeps/lint-plugin.rs | 1 + .../macro-crate-multi-decorator-literals.rs | 58 + src/test/run-pass-fulldeps/macro-quote-1.rs | 28 + .../run-pass-fulldeps/macro-quote-cond.rs | 54 + .../macro-quote-noprelude.rs | 54 + .../run-pass-fulldeps/macro-quote-prelude.rs | 54 + src/test/run-pass-fulldeps/mir-pass.rs | 3 +- src/test/run-pass-fulldeps/proc_macro.rs | 48 + src/test/run-pass-fulldeps/qquote.rs | 4 +- .../run-pass-fulldeps/rustc-macro/add-impl.rs | 25 + .../rustc-macro/auxiliary/add-impl.rs | 33 + .../rustc-macro/auxiliary/derive-a.rs | 27 + .../rustc-macro/auxiliary/derive-atob.rs | 26 + .../rustc-macro/auxiliary/derive-ctod.rs | 26 + .../auxiliary/derive-same-struct.rs | 32 + .../auxiliary/expand-with-a-macro.rs | 36 + .../rustc-macro/derive-same-struct.rs | 23 + .../rustc-macro/expand-with-a-macro.rs | 30 + .../run-pass-fulldeps/rustc-macro/load-two.rs | 30 + .../run-pass-fulldeps/rustc-macro/smoke.rs | 29 + .../run-pass-valgrind/cast-enum-with-dtor.rs | 5 +- src/test/run-pass/abi-sysv64-arg-passing.rs | 341 ++ .../run-pass/abi-sysv64-register-usage.rs | 106 + src/test/run-pass/assert-ne-macro-success.rs | 22 + src/test/run-pass/assert-ne-macro-unsized.rs | 13 + ...ociated-types-normalize-unifield-struct.rs | 3 - src/test/run-pass/atomic-access-bool.rs | 31 + src/test/run-pass/attr-before-view-item.rs | 2 - src/test/run-pass/attr-before-view-item2.rs | 2 - src/test/run-pass/auxiliary/issue-10028.rs | 3 - .../run-pass/auxiliary/issue-17718-aux.rs | 3 +- src/test/run-pass/auxiliary/issue13507.rs | 4 +- .../auxiliary/typeid-intrinsic-aux1.rs | 5 + .../auxiliary/typeid-intrinsic-aux2.rs | 5 + .../builtin-superkinds-in-metadata.rs | 2 - .../check-static-recursion-foreign.rs | 2 +- src/test/run-pass/coherence-impl-in-fn.rs | 2 - src/test/run-pass/conditional-compile-arch.rs | 3 + src/test/run-pass/conditional-compile.rs | 3 + src/test/run-pass/const-negation.rs | 2 - src/test/run-pass/cstring-drop.rs | 49 + src/test/run-pass/deriving-bounds.rs | 2 - .../deriving-meta-empty-trait-list.rs | 1 + src/test/run-pass/drop-flag-sanity-check.rs | 68 - .../run-pass/drop-flag-skip-sanity-check.rs | 67 - src/test/run-pass/dynamic-drop.rs | 29 +- src/test/run-pass/enum-discrim-autosizing.rs | 2 - src/test/run-pass/enum-size-variance.rs | 8 +- .../run-pass/exhaustive-bool-match-sanity.rs | 3 - src/test/run-pass/fds-are-cloexec.rs | 3 +- src/test/run-pass/ifmt.rs | 1 - src/test/run-pass/impl-trait/example-st.rs | 2 +- src/test/run-pass/imports.rs | 78 + src/test/run-pass/intrinsic-move-val.rs | 9 - src/test/run-pass/issue-10734.rs | 4 - .../issue-12033.rs} | 12 +- src/test/run-pass/issue-14875.rs | 43 + src/test/run-pass/issue-16648.rs | 3 +- src/test/run-pass/issue-17121.rs | 2 - src/test/run-pass/issue-19100.rs | 4 + src/test/run-pass/issue-19404.rs | 5 +- src/test/run-pass/issue-20797.rs | 4 - src/test/run-pass/issue-21400.rs | 2 - src/test/run-pass/issue-2288.rs | 2 - src/test/run-pass/issue-25757.rs | 2 +- src/test/run-pass/issue-2611-3.rs | 1 - src/test/run-pass/issue-26251.rs | 22 + src/test/run-pass/issue-27320.rs | 3 + src/test/run-pass/issue-2895.rs | 4 +- src/test/run-pass/issue-28950.rs | 10 +- src/test/run-pass/issue-32805.rs | 7 - src/test/run-pass/issue-33387.rs | 3 - src/test/run-pass/issue-33687.rs | 26 + src/test/run-pass/issue-33992.rs | 40 + src/test/run-pass/issue-34053.rs | 23 + src/test/run-pass/issue-35423.rs | 18 + src/test/run-pass/issue-35815.rs | 23 + src/test/run-pass/issue-36023.rs | 2 + src/test/run-pass/issue-36053.rs | 32 + src/test/run-pass/issue-36075.rs | 22 + .../issue-36139-normalize-closure-sig.rs | 28 + .../run-pass/issue-36278-prefix-nesting.rs | 28 + src/test/run-pass/issue-36936.rs | 2 +- src/test/run-pass/issue-37020.rs | 25 + src/test/run-pass/issue-37109.rs | 25 + src/test/run-pass/issue-37222.rs | 25 + .../run-pass/issue-37291/auxiliary/lib.rs | 52 + src/test/run-pass/issue-37291/main.rs | 29 + src/test/run-pass/issue-7784.rs | 2 - src/test/run-pass/issue-8460.rs | 2 +- src/test/run-pass/issue-9837.rs | 20 + src/test/run-pass/issue36260.rs | 22 + src/test/run-pass/iter-sum-overflow-debug.rs | 35 + src/test/run-pass/iter-sum-overflow-ndebug.rs | 23 + .../liveness-assign-imm-local-after-ret.rs | 4 +- src/test/run-pass/match-vec-alternatives.rs | 6 - src/test/run-pass/mir_adt_construction.rs | 9 +- src/test/run-pass/mir_ascription_coercion.rs | 3 +- .../run-pass/mir_augmented_assignments.rs | 16 - src/test/run-pass/mir_autoderef.rs | 4 - src/test/run-pass/mir_boxing.rs | 3 +- .../run-pass/mir_build_match_comparisons.rs | 6 - .../run-pass/mir_call_with_associated_type.rs | 4 - src/test/run-pass/mir_cast_fn_ret.rs | 4 - src/test/run-pass/mir_coercion_casts.rs | 3 - src/test/run-pass/mir_coercions.rs | 9 +- src/test/run-pass/mir_constval_adts.rs | 3 - src/test/run-pass/mir_fat_ptr.rs | 9 - src/test/run-pass/mir_fat_ptr_drop.rs | 1 - src/test/run-pass/mir_match_arm_guard.rs | 3 - src/test/run-pass/mir_misc_casts.rs | 18 +- src/test/run-pass/mir_overflow_off.rs | 2 +- src/test/run-pass/mir_raw_fat_ptr.rs | 6 - src/test/run-pass/mir_refs_correct.rs | 28 +- src/test/run-pass/mir_small_agg_arg.rs | 3 - src/test/run-pass/mir_struct_with_assoc_ty.rs | 3 - src/test/run-pass/mir_temp_promotions.rs | 3 - src/test/run-pass/mir_trans_array.rs | 2 - src/test/run-pass/mir_trans_array_2.rs | 2 - .../run-pass/mir_trans_call_converging.rs | 2 - src/test/run-pass/mir_trans_calls.rs | 24 +- src/test/run-pass/mir_trans_calls_variadic.rs | 3 - src/test/run-pass/mir_trans_critical_edge.rs | 1 - src/test/run-pass/mir_trans_spike1.rs | 3 - src/test/run-pass/mir_trans_switch.rs | 4 - src/test/run-pass/mir_trans_switchint.rs | 3 - src/test/run-pass/mir_void_return.rs | 3 - src/test/run-pass/mir_void_return_2.rs | 3 - ...drop-flag-size.rs => no-drop-flag-size.rs} | 5 +- src/test/run-pass/rfc1623.rs | 82 + src/test/run-pass/signal-exit-status.rs | 2 +- .../run-pass/simd-intrinsic-generic-cast.rs | 3 +- .../sync-send-iterators-in-libcollections.rs | 2 - src/test/run-pass/try-operator-custom.rs | 73 + src/test/run-pass/try-operator-hygiene.rs | 2 - src/test/run-pass/try-operator.rs | 2 - src/test/run-pass/type-macros-hlist.rs | 2 - src/test/run-pass/type-macros-simple.rs | 21 +- src/test/run-pass/typeid-intrinsic.rs | 13 + src/test/run-pass/union/auxiliary/union.rs | 16 + src/test/run-pass/union/union-backcomp.rs | 27 + src/test/run-pass/union/union-basic.rs | 71 + src/test/run-pass/union/union-c-interop.rs | 44 + src/test/run-pass/union/union-const-trans.rs | 27 + src/test/run-pass/union/union-derive.rs | 47 + src/test/run-pass/union/union-drop-assign.rs | 44 + src/test/run-pass/union/union-drop.rs | 65 + src/test/run-pass/union/union-generic.rs | 43 + .../run-pass/union/union-inherent-method.rs | 24 + src/test/run-pass/union/union-macro.rs | 33 + src/test/run-pass/union/union-overwrite.rs | 80 + src/test/run-pass/union/union-packed.rs | 104 + .../run-pass/union/union-pat-refutability.rs | 62 + .../union-trait-impl.rs} | 18 +- src/test/run-pass/union/union-transmute.rs | 40 + .../union/union-with-drop-fields-lint.rs | 42 + src/test/run-pass/unreachable-code-1.rs | 4 +- src/test/run-pass/vec-matching-fold.rs | 3 - .../vec-matching-legal-tail-element-borrow.rs | 3 +- src/test/run-pass/vec-matching.rs | 7 - src/test/run-pass/vec-tail-matching.rs | 2 - .../run-pass/zero-size-type-destructors.rs | 6 - .../run-pass/zero_sized_subslice_match.rs | 2 - src/test/rustdoc/assoc-types.rs | 2 + src/test/rustdoc/auxiliary/issue-36031.rs | 21 + .../rustdoc/auxiliary/rustdoc-default-impl.rs | 2 +- src/test/rustdoc/issue-19190.rs | 3 + src/test/rustdoc/issue-21092.rs | 1 + src/test/rustdoc/issue-25001.rs | 3 + src/test/rustdoc/issue-26606.rs | 2 +- src/test/rustdoc/issue-32374.rs | 2 +- src/test/rustdoc/issue-35169-2.rs | 45 + src/test/rustdoc/issue-35169.rs | 40 + src/test/rustdoc/issue-36031.rs | 19 + src/test/rustdoc/macros.rs | 4 +- src/test/rustdoc/src-links.rs | 2 + src/test/rustdoc/structfields.rs | 16 + src/test/rustdoc/titles.rs | 59 + src/test/rustdoc/union.rs | 20 + .../codemap_tests}/bad-format-args.rs | 7 - .../ui/codemap_tests/bad-format-args.stderr | 26 + src/test/ui/codemap_tests/empty_span.rs | 1 - src/test/ui/codemap_tests/empty_span.stderr | 4 +- .../codemap_tests/huge_multispan_highlight.rs | 7 +- .../huge_multispan_highlight.stderr | 2 +- src/test/ui/codemap_tests/issue-11715.rs | 5 +- .../codemap_tests}/issue-28308.rs | 4 - src/test/ui/codemap_tests/issue-28308.stderr | 10 + src/test/ui/codemap_tests/one_line.rs | 2 - src/test/ui/codemap_tests/one_line.stderr | 4 +- .../ui/codemap_tests/overlapping_spans.rs | 1 - .../ui/codemap_tests/overlapping_spans.stderr | 4 +- .../codemap_tests/repair_span_std_macros.rs | 13 + .../repair_span_std_macros.stderr | 11 + src/test/ui/codemap_tests/tab.rs | 3 +- src/test/ui/codemap_tests/tab.stderr | 2 +- src/test/ui/codemap_tests/two_files.rs | 1 - src/test/ui/codemap_tests/two_files.stderr | 4 +- src/test/ui/codemap_tests/two_files_data.rs | 3 +- src/test/ui/codemap_tests/unicode.rs | 1 - src/test/ui/codemap_tests/unicode.stderr | 6 +- .../auxiliary/extern_macro_crate.rs | 23 + .../cross-crate-macro-backtrace/main.rs} | 6 +- .../cross-crate-macro-backtrace/main.stderr | 10 + .../ui/lifetimes/borrowck-let-suggestion.rs | 17 + .../lifetimes/borrowck-let-suggestion.stderr | 14 + src/test/ui/macros/bad_hello.rs | 13 + src/test/ui/macros/bad_hello.stderr | 8 + .../macro-backtrace-invalid-internals.rs | 24 +- .../macro-backtrace-invalid-internals.stderr | 56 + .../macros}/macro-backtrace-nested.rs | 11 +- .../ui/macros/macro-backtrace-nested.stderr | 20 + .../macros}/macro-backtrace-println.rs | 8 +- .../ui/macros/macro-backtrace-println.stderr | 11 + .../mismatched_types/const-fn-in-trait.rs} | 18 +- .../mismatched_types/const-fn-in-trait.stderr | 14 + src/test/ui/mismatched_types/issue-26480.rs | 1 - .../ui/mismatched_types/issue-26480.stderr | 16 +- src/test/ui/mismatched_types/issue-35030.rs | 25 + .../ui/mismatched_types/issue-35030.stderr | 11 + src/test/ui/mismatched_types/main.rs | 2 - src/test/ui/mismatched_types/main.stderr | 4 +- .../trait-impl-fn-incompatibility.rs | 27 + .../trait-impl-fn-incompatibility.stderr | 23 + src/test/ui/span/E0493.rs | 30 + src/test/ui/span/E0493.stderr | 11 + src/test/ui/span/E0535.rs | 14 + src/test/ui/span/E0535.stderr | 8 + src/test/ui/span/E0536.rs | 14 + src/test/ui/span/E0536.stderr | 8 + src/test/ui/span/E0537.rs | 14 + src/test/ui/span/E0537.stderr | 8 + .../{compile-fail => ui/span}/issue-11925.rs | 2 +- src/test/ui/span/issue-11925.stderr | 14 + src/test/ui/span/issue-36530.rs | 14 + src/test/ui/span/issue-36530.stderr | 18 + .../span/move-closure.rs} | 5 +- src/test/ui/span/move-closure.stderr | 11 + .../span/type-binding.rs} | 8 +- src/test/ui/span/type-binding.stderr | 8 + .../span}/typo-suggestion.rs | 2 - src/test/ui/span/typo-suggestion.stderr | 14 + src/tools/cargotest/main.rs | 2 +- src/tools/compiletest/Cargo.lock | 10 + src/tools/compiletest/src/common.rs | 3 + src/tools/compiletest/src/header.rs | 72 +- src/tools/compiletest/src/main.rs | 3 +- src/tools/compiletest/src/runtest.rs | 64 +- src/tools/compiletest/src/util.rs | 3 +- src/tools/error_index_generator/main.rs | 1 - src/tools/linkchecker/Cargo.lock | 6 + src/tools/linkchecker/main.rs | 7 +- src/tools/rustbook/build.rs | 8 +- src/tools/rustbook/main.rs | 1 - src/tools/tidy/src/bins.rs | 30 +- src/tools/tidy/src/cargo.rs | 6 + version | 1 + 1585 files changed, 49904 insertions(+), 47463 deletions(-) create mode 100644 mk/cfg/i686-unknown-haiku.mk create mode 100644 mk/cfg/mips-unknown-linux-uclibc.mk create mode 100644 mk/cfg/mips64-unknown-linux-gnuabi64.mk create mode 100644 mk/cfg/mips64el-unknown-linux-gnuabi64.mk create mode 100644 mk/cfg/mipsel-unknown-linux-uclibc.mk create mode 100644 mk/cfg/s390x-unknown-linux-gnu.mk create mode 100644 mk/cfg/x86_64-unknown-haiku.mk delete mode 100644 src/doc/style/README.md delete mode 100644 src/doc/style/SUMMARY.md delete mode 100644 src/doc/style/errors/README.md delete mode 100644 src/doc/style/errors/ergonomics.md delete mode 100644 src/doc/style/errors/handling.md delete mode 100644 src/doc/style/errors/propagation.md delete mode 100644 src/doc/style/errors/signaling.md delete mode 100644 src/doc/style/features/README.md delete mode 100644 src/doc/style/features/crates.md delete mode 100644 src/doc/style/features/functions-and-methods/README.md delete mode 100644 src/doc/style/features/functions-and-methods/convenience.md delete mode 100644 src/doc/style/features/functions-and-methods/input.md delete mode 100644 src/doc/style/features/functions-and-methods/output.md delete mode 100644 src/doc/style/features/let.md delete mode 100644 src/doc/style/features/loops.md delete mode 100644 src/doc/style/features/match.md delete mode 100644 src/doc/style/features/modules.md delete mode 100644 src/doc/style/features/traits/README.md delete mode 100644 src/doc/style/features/traits/common.md delete mode 100644 src/doc/style/features/traits/extensions.md delete mode 100644 src/doc/style/features/traits/generics.md delete mode 100644 src/doc/style/features/traits/objects.md delete mode 100644 src/doc/style/features/traits/overloading.md delete mode 100644 src/doc/style/features/traits/reuse.md delete mode 100644 src/doc/style/features/types/README.md delete mode 100644 src/doc/style/features/types/conversions.md delete mode 100644 src/doc/style/features/types/newtype.md delete mode 100644 src/doc/style/ownership/README.md delete mode 100644 src/doc/style/ownership/builders.md delete mode 100644 src/doc/style/ownership/cell-smart.md delete mode 100644 src/doc/style/ownership/constructors.md delete mode 100644 src/doc/style/ownership/destructors.md delete mode 100644 src/doc/style/ownership/raii.md delete mode 100644 src/doc/style/platform.md delete mode 100644 src/doc/style/safety/README.md delete mode 100644 src/doc/style/safety/lib-guarantees.md delete mode 100644 src/doc/style/safety/unsafe.md delete mode 100644 src/doc/style/style/README.md delete mode 100644 src/doc/style/style/braces.md delete mode 100644 src/doc/style/style/comments.md delete mode 100644 src/doc/style/style/features.md delete mode 100644 src/doc/style/style/imports.md delete mode 100644 src/doc/style/style/naming/README.md delete mode 100644 src/doc/style/style/naming/containers.md delete mode 100644 src/doc/style/style/naming/conversions.md delete mode 100644 src/doc/style/style/naming/iterators.md delete mode 100644 src/doc/style/style/naming/ownership.md delete mode 100644 src/doc/style/style/optional.md delete mode 100644 src/doc/style/style/organization.md delete mode 100644 src/doc/style/style/whitespace.md delete mode 100644 src/doc/style/testing/README.md delete mode 100644 src/doc/style/testing/unit.md delete mode 100644 src/doc/style/todo.md create mode 100644 src/libcompiler_builtins/Cargo.toml create mode 100644 src/libcompiler_builtins/build.rs create mode 100644 src/libcompiler_builtins/lib.rs create mode 100644 src/liblibc/Cargo.lock create mode 100644 src/liblibc/ci/docker/i686-unknown-linux-musl/Dockerfile create mode 100644 src/liblibc/ci/docker/mips-unknown-linux-musl/Dockerfile create mode 100644 src/liblibc/ci/docker/mipsel-unknown-linux-musl/Dockerfile create mode 100644 src/liblibc/ci/docker/powerpc-unknown-linux-gnu/Dockerfile create mode 100644 src/liblibc/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile delete mode 100644 src/liblibc/libc-test/Cargo.lock delete mode 100644 src/liblibc/libc-test/generate-files/Cargo.lock rename src/liblibc/src/unix/bsd/{openbsdlike => netbsdlike}/mod.rs (96%) rename src/liblibc/src/unix/bsd/{openbsdlike/netbsd.rs => netbsdlike/netbsd/mod.rs} (92%) create mode 100644 src/liblibc/src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs create mode 100644 src/liblibc/src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs create mode 100644 src/liblibc/src/unix/bsd/netbsdlike/netbsd/other/mod.rs create mode 100644 src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/bitrig.rs rename src/liblibc/src/unix/bsd/{openbsdlike/openbsd.rs => netbsdlike/openbsdlike/mod.rs} (81%) create mode 100644 src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/openbsd.rs delete mode 100644 src/liblibc/src/unix/bsd/openbsdlike/bitrig.rs create mode 100644 src/liblibc/src/unix/haiku/b32.rs create mode 100644 src/liblibc/src/unix/haiku/b64.rs create mode 100644 src/liblibc/src/unix/haiku/mod.rs create mode 100644 src/liblibc/src/unix/notbsd/linux/mips64.rs create mode 100644 src/liblibc/src/unix/notbsd/linux/s390x.rs create mode 100644 src/libproc_macro/Cargo.toml create mode 100644 src/libproc_macro/build.rs create mode 100644 src/libproc_macro/lib.rs create mode 100644 src/libproc_macro/parse.rs create mode 100644 src/libproc_macro/prelude.rs create mode 100644 src/libproc_macro/qquote.rs delete mode 100644 src/librbml/lib.rs create mode 100644 src/librustc/dep_graph/shadow.rs delete mode 100644 src/librustc/hir/fold.rs create mode 100644 src/librustc_back/target/haiku_base.rs create mode 100644 src/librustc_back/target/i686_unknown_haiku.rs create mode 100644 src/librustc_back/target/mips64_unknown_linux_gnuabi64.rs create mode 100644 src/librustc_back/target/mips64el_unknown_linux_gnuabi64.rs create mode 100644 src/librustc_back/target/mips_unknown_linux_uclibc.rs create mode 100644 src/librustc_back/target/mipsel_unknown_linux_uclibc.rs create mode 100644 src/librustc_back/target/s390x_unknown_linux_gnu.rs create mode 100644 src/librustc_back/target/x86_64_unknown_haiku.rs rename src/{librustdoc => librustc_data_structures}/flock.rs (57%) create mode 100644 src/librustc_driver/derive_registrar.rs create mode 100644 src/librustc_errors/lock.rs create mode 100644 src/librustc_incremental/calculate_svh/caching_codemap_view.rs create mode 100644 src/librustc_incremental/calculate_svh/def_path_hash.rs create mode 100644 src/librustc_incremental/persist/file_format.rs create mode 100644 src/librustc_incremental/persist/fs.rs delete mode 100644 src/librustc_incremental/persist/util.rs rename src/{librbml => librustc_macro}/Cargo.toml (57%) create mode 100644 src/librustc_macro/lib.rs delete mode 100644 src/librustc_metadata/common.rs delete mode 100644 src/librustc_metadata/def_key.rs create mode 100644 src/librustc_metadata/index_builder.rs delete mode 100644 src/librustc_metadata/macros.rs create mode 100644 src/librustc_metadata/schema.rs delete mode 100644 src/librustc_metadata/tls_context.rs delete mode 100644 src/librustc_metadata/tydecode.rs delete mode 100644 src/librustc_metadata/tyencode.rs create mode 100644 src/librustc_mir/def_use.rs create mode 100644 src/librustc_mir/transform/copy_prop.rs create mode 100644 src/librustc_mir/transform/instcombine.rs delete mode 100644 src/librustc_resolve/assign_ids.rs create mode 100644 src/librustc_resolve/macros.rs create mode 100644 src/librustc_save_analysis/json_api_dumper.rs delete mode 100644 src/librustc_trans/_match.rs rename src/{librustc_back => librustc_trans/back}/rpath.rs (98%) create mode 100644 src/librustc_trans/cabi_mips64.rs create mode 100644 src/librustc_trans/cabi_s390x.rs delete mode 100644 src/librustc_trans/controlflow.rs delete mode 100644 src/librustc_trans/datum.rs delete mode 100644 src/librustc_trans/expr.rs delete mode 100644 src/librustc_trans/inline.rs delete mode 100644 src/librustdoc/html/static/jquery-2.1.4.min.js delete mode 100644 src/librustdoc/passes.rs create mode 100644 src/librustdoc/passes/collapse_docs.rs create mode 100644 src/librustdoc/passes/mod.rs create mode 100644 src/librustdoc/passes/strip_hidden.rs create mode 100644 src/librustdoc/passes/strip_priv_imports.rs create mode 100644 src/librustdoc/passes/strip_private.rs create mode 100644 src/librustdoc/passes/unindent_comments.rs rename src/{librbml => libserialize}/leb128.rs (100%) rename src/{librbml => libserialize}/opaque.rs (62%) create mode 100644 src/libstd/os/haiku/fs.rs rename src/{test/run-pass/single-derive-attr-with-gate.rs => libstd/os/haiku/mod.rs} (78%) create mode 100644 src/libstd/os/haiku/raw.rs create mode 100644 src/libsyntax/ext/placeholders.rs create mode 100644 src/libsyntax/ext/proc_macro_shim.rs create mode 100644 src/libsyntax_ext/deriving/custom.rs create mode 100644 src/libsyntax_ext/rustc_macro_registrar.rs create mode 100644 src/rust-installer/test/image-docdir1/share/doc/rust/README create mode 100644 src/rust-installer/test/image-docdir1/share/doc/rust/rustdocs.txt create mode 100644 src/rust-installer/test/image-docdir2/share/doc/cargo/README create mode 100644 src/rust-installer/test/image-docdir2/share/doc/cargo/cargodocs.txt create mode 100644 src/test/codegen/abi-sysv64.rs create mode 100644 src/test/codegen/likely.rs delete mode 100644 src/test/compile-fail-fulldeps/auxiliary/macro_crate_MacroRulesTT.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/append-impl.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/at-the-root.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/attribute.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/auxiliary/append-impl.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/auxiliary/derive-a-2.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/auxiliary/derive-a.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/auxiliary/derive-bad.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/auxiliary/derive-panic.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/auxiliary/derive-unstable-2.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/auxiliary/derive-unstable.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/cannot-link.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/define-two.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/derive-bad.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/derive-still-gated.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/expand-to-unstable-2.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/expand-to-unstable.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/export-macro.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/exports.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/feature-gate-1.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/feature-gate-2.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/feature-gate-3.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/feature-gate-4.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/feature-gate-5.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/import.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/load-panic.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/require-rustc-macro-crate-type.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/shadow-builtin.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/shadow.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/signature.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/two-crate-types-1.rs create mode 100644 src/test/compile-fail-fulldeps/rustc-macro/two-crate-types-2.rs create mode 100644 src/test/compile-fail/E0423.rs create mode 100644 src/test/compile-fail/E0424.rs create mode 100644 src/test/compile-fail/E0425.rs create mode 100644 src/test/compile-fail/E0426.rs create mode 100644 src/test/compile-fail/E0428.rs create mode 100644 src/test/compile-fail/E0429.rs create mode 100644 src/test/compile-fail/E0430.rs create mode 100644 src/test/compile-fail/E0431.rs create mode 100644 src/test/compile-fail/E0432.rs create mode 100644 src/test/compile-fail/E0433.rs create mode 100644 src/test/compile-fail/E0434.rs create mode 100644 src/test/compile-fail/E0435.rs create mode 100644 src/test/compile-fail/E0437.rs create mode 100644 src/test/compile-fail/E0438.rs create mode 100644 src/test/compile-fail/E0439.rs create mode 100644 src/test/compile-fail/E0440.rs create mode 100644 src/test/compile-fail/E0441.rs create mode 100644 src/test/compile-fail/E0442.rs create mode 100644 src/test/compile-fail/E0443.rs create mode 100644 src/test/compile-fail/E0444.rs create mode 100644 src/test/compile-fail/E0445.rs create mode 100644 src/test/compile-fail/E0446.rs create mode 100644 src/test/compile-fail/E0449.rs create mode 100644 src/test/compile-fail/E0450.rs create mode 100644 src/test/compile-fail/E0451.rs create mode 100644 src/test/compile-fail/E0452.rs create mode 100644 src/test/compile-fail/E0453.rs create mode 100644 src/test/compile-fail/E0454.rs create mode 100644 src/test/compile-fail/E0458.rs create mode 100644 src/test/compile-fail/E0459.rs create mode 100644 src/test/compile-fail/E0463.rs rename src/test/compile-fail/{feature-gate-try-operator.rs => E0478.rs} (72%) create mode 100644 src/test/compile-fail/E0492.rs create mode 100644 src/test/compile-fail/E0494.rs create mode 100644 src/test/compile-fail/E0496.rs create mode 100644 src/test/compile-fail/E0499.rs create mode 100644 src/test/compile-fail/E0501.rs create mode 100644 src/test/compile-fail/E0502.rs create mode 100644 src/test/compile-fail/E0503.rs create mode 100644 src/test/compile-fail/E0504.rs create mode 100644 src/test/compile-fail/E0505.rs create mode 100644 src/test/compile-fail/E0506.rs create mode 100644 src/test/compile-fail/E0507.rs create mode 100644 src/test/compile-fail/E0508.rs rename src/test/{run-pass/mir_cross_crate.rs => compile-fail/E0509.rs} (65%) create mode 100644 src/test/compile-fail/E0511.rs create mode 100644 src/test/compile-fail/E0512.rs create mode 100644 src/test/compile-fail/E0513.rs create mode 100644 src/test/compile-fail/E0516.rs create mode 100644 src/test/compile-fail/E0517.rs create mode 100644 src/test/compile-fail/E0518.rs create mode 100644 src/test/compile-fail/E0520.rs create mode 100644 src/test/compile-fail/E0522.rs create mode 100644 src/test/compile-fail/E0527.rs create mode 100644 src/test/compile-fail/E0528.rs create mode 100644 src/test/compile-fail/E0529.rs create mode 100644 src/test/compile-fail/E0530.rs create mode 100644 src/test/compile-fail/E0534.rs create mode 100644 src/test/compile-fail/E0558.rs create mode 100644 src/test/compile-fail/E0559.rs create mode 100644 src/test/compile-fail/E0560.rs create mode 100644 src/test/compile-fail/E0565-1.rs create mode 100644 src/test/compile-fail/E0565.rs create mode 100644 src/test/compile-fail/associated-types/issue-36499.rs create mode 100644 src/test/compile-fail/attr-literals.rs create mode 100644 src/test/compile-fail/auxiliary/extern-statics.rs create mode 100644 src/test/compile-fail/auxiliary/issue-36708.rs create mode 100644 src/test/compile-fail/auxiliary/issue_19452_aux.rs delete mode 100644 src/test/compile-fail/borrowck/borrowck-let-suggestion.rs create mode 100644 src/test/compile-fail/borrowck/borrowck-union-borrow-nested.rs create mode 100644 src/test/compile-fail/borrowck/borrowck-union-borrow.rs create mode 100644 src/test/compile-fail/borrowck/borrowck-union-move-assign.rs create mode 100644 src/test/compile-fail/borrowck/borrowck-union-move.rs create mode 100644 src/test/compile-fail/borrowck/borrowck-union-uninitialized.rs create mode 100644 src/test/compile-fail/conflicting-repr-hints.rs create mode 100644 src/test/compile-fail/feature-gate-abi-sysv64.rs create mode 100644 src/test/compile-fail/feature-gate-compiler-builtins.rs create mode 100644 src/test/compile-fail/feature-gate-static-in-const.rs create mode 100644 src/test/compile-fail/gated-attr-literals.rs create mode 100644 src/test/compile-fail/imports/duplicate.rs create mode 100644 src/test/compile-fail/imports/reexports.rs create mode 100644 src/test/compile-fail/imports/unused.rs create mode 100644 src/test/compile-fail/issue-11004.rs create mode 100644 src/test/compile-fail/issue-20433.rs create mode 100644 src/test/compile-fail/issue-24204.rs create mode 100644 src/test/compile-fail/issue-28324.rs create mode 100644 src/test/compile-fail/issue-28625.rs create mode 100644 src/test/compile-fail/issue-35668.rs create mode 100644 src/test/compile-fail/issue-35869.rs create mode 100644 src/test/compile-fail/issue-36053-2.rs create mode 100644 src/test/compile-fail/issue-36299.rs create mode 100644 src/test/compile-fail/issue-36617.rs create mode 100644 src/test/compile-fail/issue-36638.rs create mode 100644 src/test/compile-fail/issue-36708.rs create mode 100644 src/test/compile-fail/issue-5067.rs delete mode 100644 src/test/compile-fail/lint-no-drop-on-repr-extern.rs create mode 100644 src/test/compile-fail/macro-expansion-tests.rs create mode 100644 src/test/compile-fail/missing-block-hint.rs create mode 100644 src/test/compile-fail/move-out-of-slice-1.rs create mode 100644 src/test/compile-fail/paths-in-macro-invocations.rs create mode 100644 src/test/compile-fail/platform-intrinsic-params.rs create mode 100644 src/test/compile-fail/privacy/union-field-privacy-1.rs create mode 100644 src/test/compile-fail/privacy/union-field-privacy-2.rs rename src/test/compile-fail/{enable-orbit-for-incr-comp.rs => question-mark-type-infer.rs} (57%) delete mode 100644 src/test/compile-fail/rfc1592-deprecated.rs create mode 100644 src/test/compile-fail/rfc1623.rs create mode 100644 src/test/compile-fail/safe-extern-statics-mut.rs create mode 100644 src/test/compile-fail/safe-extern-statics.rs delete mode 100644 src/test/compile-fail/traits-inductive-overflow-auto-normal-auto.rs create mode 100644 src/test/compile-fail/typeck-auto-trait-no-supertraits-2.rs create mode 100644 src/test/compile-fail/typeck-auto-trait-no-supertraits.rs create mode 100644 src/test/compile-fail/typeck-auto-trait-no-typeparams.rs delete mode 100644 src/test/compile-fail/typeck-default-trait-impl-superregion.rs delete mode 100644 src/test/compile-fail/typeck-default-trait-impl-supertrait.rs delete mode 100644 src/test/compile-fail/typeck-default-trait-impl-trait-where-clause-2.rs delete mode 100644 src/test/compile-fail/typeck-default-trait-impl-trait-where-clause.rs create mode 100644 src/test/compile-fail/union/union-const-eval.rs create mode 100644 src/test/compile-fail/union/union-const-pat.rs create mode 100644 src/test/compile-fail/union/union-copy.rs create mode 100644 src/test/compile-fail/union/union-derive-clone.rs create mode 100644 src/test/compile-fail/union/union-derive-eq.rs create mode 100644 src/test/compile-fail/union/union-derive.rs create mode 100644 src/test/compile-fail/union/union-empty.rs create mode 100644 src/test/compile-fail/union/union-feature-gate.rs create mode 100644 src/test/compile-fail/union/union-fields.rs create mode 100644 src/test/compile-fail/union/union-generic.rs create mode 100644 src/test/compile-fail/union/union-lint-dead-code.rs create mode 100644 src/test/compile-fail/union/union-nonrepresentable.rs create mode 100644 src/test/compile-fail/union/union-repr-c.rs create mode 100644 src/test/compile-fail/union/union-suggest-field.rs create mode 100644 src/test/compile-fail/union/union-unsafe.rs create mode 100644 src/test/compile-fail/union/union-unsized.rs create mode 100644 src/test/compile-fail/union/union-with-drop-fields-lint.rs create mode 100644 src/test/compile-fail/user-defined-macro-rules.rs create mode 100644 src/test/debuginfo/auxiliary/macro-stepping.rs create mode 100644 src/test/debuginfo/macro-stepping.rs create mode 100644 src/test/debuginfo/union-smoke.rs create mode 100644 src/test/incremental/cache_file_headers.rs create mode 100644 src/test/incremental/crate_hash_reorder.rs create mode 100644 src/test/incremental/hashes/struct_defs.rs create mode 100644 src/test/incremental/issue-35593.rs create mode 100644 src/test/incremental/krate-inherent.rs create mode 100644 src/test/incremental/krate-inlined.rs create mode 100644 src/test/incremental/remove-private-item-cross-crate/auxiliary/a.rs create mode 100644 src/test/incremental/remove-private-item-cross-crate/main.rs create mode 100644 src/test/incremental/source_loc_macros.rs create mode 100644 src/test/incremental/span_hash_stable/auxiliary/mod.rs create mode 100644 src/test/incremental/span_hash_stable/auxiliary/sub1.rs create mode 100644 src/test/incremental/span_hash_stable/auxiliary/sub2.rs create mode 100644 src/test/incremental/span_hash_stable/main.rs create mode 100644 src/test/incremental/spans_insignificant_w_o_debuginfo.rs create mode 100644 src/test/incremental/spans_significant_w_debuginfo.rs create mode 100644 src/test/parse-fail/doc-after-struct-field.rs create mode 100644 src/test/parse-fail/doc-before-fn-rbrace.rs rename src/test/{compile-fail/type-macros-fail.rs => parse-fail/doc-before-identifier.rs} (74%) create mode 100644 src/test/parse-fail/doc-before-mod-rbrace.rs create mode 100644 src/test/parse-fail/doc-before-struct-rbrace-1.rs create mode 100644 src/test/parse-fail/doc-before-struct-rbrace-2.rs delete mode 100644 src/test/parse-fail/obsolete-closure-kind.rs create mode 100644 src/test/parse-fail/suffixed-literal-meta.rs rename src/test/{parse-fail/non-str-meta.rs => pretty/attr-literals.rs} (66%) rename src/test/run-fail/{overflowing-pow.rs => overflowing-pow-signed.rs} (100%) create mode 100644 src/test/run-fail/overflowing-pow-unsigned.rs create mode 100644 src/test/run-make/link-arg/Makefile create mode 100644 src/test/run-make/link-arg/empty.rs create mode 100644 src/test/run-make/no-builtins-lto/Makefile create mode 100644 src/test/run-make/no-builtins-lto/main.rs create mode 100644 src/test/run-make/no-builtins-lto/no_builtins.rs create mode 100644 src/test/run-pass-fulldeps/auxiliary/cond_noprelude_plugin.rs create mode 100644 src/test/run-pass-fulldeps/auxiliary/cond_plugin.rs create mode 100644 src/test/run-pass-fulldeps/auxiliary/cond_prelude_plugin.rs create mode 100644 src/test/run-pass-fulldeps/auxiliary/custom_derive_partial_eq.rs create mode 100644 src/test/run-pass-fulldeps/auxiliary/proc_macro_def.rs create mode 100644 src/test/run-pass-fulldeps/custom-derive-partial-eq.rs create mode 100644 src/test/run-pass-fulldeps/macro-crate-multi-decorator-literals.rs create mode 100644 src/test/run-pass-fulldeps/macro-quote-1.rs create mode 100644 src/test/run-pass-fulldeps/macro-quote-cond.rs create mode 100644 src/test/run-pass-fulldeps/macro-quote-noprelude.rs create mode 100644 src/test/run-pass-fulldeps/macro-quote-prelude.rs create mode 100644 src/test/run-pass-fulldeps/proc_macro.rs create mode 100644 src/test/run-pass-fulldeps/rustc-macro/add-impl.rs create mode 100644 src/test/run-pass-fulldeps/rustc-macro/auxiliary/add-impl.rs create mode 100644 src/test/run-pass-fulldeps/rustc-macro/auxiliary/derive-a.rs create mode 100644 src/test/run-pass-fulldeps/rustc-macro/auxiliary/derive-atob.rs create mode 100644 src/test/run-pass-fulldeps/rustc-macro/auxiliary/derive-ctod.rs create mode 100644 src/test/run-pass-fulldeps/rustc-macro/auxiliary/derive-same-struct.rs create mode 100644 src/test/run-pass-fulldeps/rustc-macro/auxiliary/expand-with-a-macro.rs create mode 100644 src/test/run-pass-fulldeps/rustc-macro/derive-same-struct.rs create mode 100644 src/test/run-pass-fulldeps/rustc-macro/expand-with-a-macro.rs create mode 100644 src/test/run-pass-fulldeps/rustc-macro/load-two.rs create mode 100644 src/test/run-pass-fulldeps/rustc-macro/smoke.rs create mode 100644 src/test/run-pass/abi-sysv64-arg-passing.rs create mode 100644 src/test/run-pass/abi-sysv64-register-usage.rs create mode 100644 src/test/run-pass/assert-ne-macro-success.rs create mode 100644 src/test/run-pass/assert-ne-macro-unsized.rs create mode 100644 src/test/run-pass/atomic-access-bool.rs create mode 100644 src/test/run-pass/cstring-drop.rs delete mode 100644 src/test/run-pass/drop-flag-sanity-check.rs delete mode 100644 src/test/run-pass/drop-flag-skip-sanity-check.rs create mode 100644 src/test/run-pass/imports.rs rename src/test/{compile-fail-fulldeps/plugin-MacroRulesTT.rs => run-pass/issue-12033.rs} (70%) create mode 100644 src/test/run-pass/issue-14875.rs create mode 100644 src/test/run-pass/issue-26251.rs create mode 100644 src/test/run-pass/issue-33687.rs create mode 100644 src/test/run-pass/issue-33992.rs create mode 100644 src/test/run-pass/issue-34053.rs create mode 100644 src/test/run-pass/issue-35423.rs create mode 100644 src/test/run-pass/issue-35815.rs create mode 100644 src/test/run-pass/issue-36053.rs create mode 100644 src/test/run-pass/issue-36075.rs create mode 100644 src/test/run-pass/issue-36139-normalize-closure-sig.rs create mode 100644 src/test/run-pass/issue-36278-prefix-nesting.rs create mode 100644 src/test/run-pass/issue-37020.rs create mode 100644 src/test/run-pass/issue-37109.rs create mode 100644 src/test/run-pass/issue-37222.rs create mode 100644 src/test/run-pass/issue-37291/auxiliary/lib.rs create mode 100644 src/test/run-pass/issue-37291/main.rs create mode 100644 src/test/run-pass/issue-9837.rs create mode 100644 src/test/run-pass/issue36260.rs create mode 100644 src/test/run-pass/iter-sum-overflow-debug.rs create mode 100644 src/test/run-pass/iter-sum-overflow-ndebug.rs rename src/test/run-pass/{attr-no-drop-flag-size.rs => no-drop-flag-size.rs} (83%) create mode 100644 src/test/run-pass/rfc1623.rs create mode 100644 src/test/run-pass/try-operator-custom.rs create mode 100644 src/test/run-pass/union/auxiliary/union.rs create mode 100644 src/test/run-pass/union/union-backcomp.rs create mode 100644 src/test/run-pass/union/union-basic.rs create mode 100644 src/test/run-pass/union/union-c-interop.rs create mode 100644 src/test/run-pass/union/union-const-trans.rs create mode 100644 src/test/run-pass/union/union-derive.rs create mode 100644 src/test/run-pass/union/union-drop-assign.rs create mode 100644 src/test/run-pass/union/union-drop.rs create mode 100644 src/test/run-pass/union/union-generic.rs create mode 100644 src/test/run-pass/union/union-inherent-method.rs create mode 100644 src/test/run-pass/union/union-macro.rs create mode 100644 src/test/run-pass/union/union-overwrite.rs create mode 100644 src/test/run-pass/union/union-packed.rs create mode 100644 src/test/run-pass/union/union-pat-refutability.rs rename src/test/run-pass/{rfc1592-deprecated.rs => union/union-trait-impl.rs} (64%) create mode 100644 src/test/run-pass/union/union-transmute.rs create mode 100644 src/test/run-pass/union/union-with-drop-fields-lint.rs create mode 100644 src/test/rustdoc/auxiliary/issue-36031.rs create mode 100644 src/test/rustdoc/issue-35169-2.rs create mode 100644 src/test/rustdoc/issue-35169.rs create mode 100644 src/test/rustdoc/issue-36031.rs create mode 100644 src/test/rustdoc/titles.rs create mode 100644 src/test/rustdoc/union.rs rename src/test/{compile-fail => ui/codemap_tests}/bad-format-args.rs (71%) create mode 100644 src/test/ui/codemap_tests/bad-format-args.stderr rename src/test/{compile-fail => ui/codemap_tests}/issue-28308.rs (75%) create mode 100644 src/test/ui/codemap_tests/issue-28308.stderr create mode 100644 src/test/ui/codemap_tests/repair_span_std_macros.rs create mode 100644 src/test/ui/codemap_tests/repair_span_std_macros.stderr create mode 100644 src/test/ui/cross-crate-macro-backtrace/auxiliary/extern_macro_crate.rs rename src/test/{compile-fail/regions-in-consts.rs => ui/cross-crate-macro-backtrace/main.rs} (76%) create mode 100644 src/test/ui/cross-crate-macro-backtrace/main.stderr create mode 100644 src/test/ui/lifetimes/borrowck-let-suggestion.rs create mode 100644 src/test/ui/lifetimes/borrowck-let-suggestion.stderr create mode 100644 src/test/ui/macros/bad_hello.rs create mode 100644 src/test/ui/macros/bad_hello.stderr rename src/test/{compile-fail => ui/macros}/macro-backtrace-invalid-internals.rs (57%) create mode 100644 src/test/ui/macros/macro-backtrace-invalid-internals.stderr rename src/test/{compile-fail => ui/macros}/macro-backtrace-nested.rs (66%) create mode 100644 src/test/ui/macros/macro-backtrace-nested.stderr rename src/test/{compile-fail => ui/macros}/macro-backtrace-println.rs (66%) create mode 100644 src/test/ui/macros/macro-backtrace-println.stderr rename src/test/{compile-fail/unsafe_no_drop_flag-gate.rs => ui/mismatched_types/const-fn-in-trait.rs} (70%) create mode 100644 src/test/ui/mismatched_types/const-fn-in-trait.stderr create mode 100644 src/test/ui/mismatched_types/issue-35030.rs create mode 100644 src/test/ui/mismatched_types/issue-35030.stderr create mode 100644 src/test/ui/mismatched_types/trait-impl-fn-incompatibility.rs create mode 100644 src/test/ui/mismatched_types/trait-impl-fn-incompatibility.stderr create mode 100644 src/test/ui/span/E0493.rs create mode 100644 src/test/ui/span/E0493.stderr create mode 100644 src/test/ui/span/E0535.rs create mode 100644 src/test/ui/span/E0535.stderr create mode 100644 src/test/ui/span/E0536.rs create mode 100644 src/test/ui/span/E0536.stderr create mode 100644 src/test/ui/span/E0537.rs create mode 100644 src/test/ui/span/E0537.stderr rename src/test/{compile-fail => ui/span}/issue-11925.rs (89%) create mode 100644 src/test/ui/span/issue-11925.stderr create mode 100644 src/test/ui/span/issue-36530.rs create mode 100644 src/test/ui/span/issue-36530.stderr rename src/test/{compile-fail/move-closure-span.rs => ui/span/move-closure.rs} (89%) create mode 100644 src/test/ui/span/move-closure.stderr rename src/test/{compile-fail/ty_binding_span.rs => ui/span/type-binding.rs} (85%) create mode 100644 src/test/ui/span/type-binding.stderr rename src/test/{compile-fail => ui/span}/typo-suggestion.rs (87%) create mode 100644 src/test/ui/span/typo-suggestion.stderr create mode 100644 version diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6093577078..4c0f93c370 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -151,6 +151,10 @@ Some common make targets are: command above as we only build the stage1 compiler, not the entire thing). You can also leave off the `-rpass` to run all stage1 test types. - `make check-stage1-coretest` - Run stage1 tests in `libcore`. +- `make tidy` - Check that the source code is in compliance with Rust's style + guidelines. There is no official document describing Rust's full guidelines + as of yet, but basic rules like 4 spaces for indentation and no more than 99 + characters in a single line should be kept in mind when writing code. ## Pull Requests @@ -177,6 +181,15 @@ you’re adding something to the standard library, try This will not rebuild the compiler, but will run the tests. +Please make sure your pull request is in compliance with Rust's style +guidelines by running + + $ make tidy + +Make this check before every pull request (and every new commit in a pull +request) ; you can add [git hooks](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks) +before every push to make sure you never forget to make this check. + All pull requests are reviewed by another person. We have a bot, @rust-highfive, that will automatically assign a random person to review your request. @@ -230,7 +243,7 @@ To find documentation-related issues, sort by the [A-docs label][adocs]. In many cases, you don't need a full `make doc`. You can use `rustdoc` directly to check small fixes. For example, `rustdoc src/doc/reference.md` will render reference to `doc/reference.html`. The CSS might be messed up, but you can -verify that HTML is right. +verify that the HTML is right. ## Issue Triage diff --git a/README.md b/README.md index 283efdd241..f2385f3151 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ Read ["Installing Rust"] from [The Book]. * `g++` 4.7 or later or `clang++` 3.x * `python` 2.7 (but not 3.x) * GNU `make` 3.81 or later - * `cmake` 2.8.8 or later + * `cmake` 3.4.3 or later * `curl` * `git` @@ -170,7 +170,7 @@ fetch snapshots, and an OS that can execute the available snapshot binaries. Snapshot binaries are currently built and tested on several platforms: -| Platform \ Architecture | x86 | x86_64 | +| Platform / Architecture | x86 | x86_64 | |--------------------------------|-----|--------| | Windows (7, 8, Server 2008 R2) | ✓ | ✓ | | Linux (2.6.18 or later) | ✓ | ✓ | diff --git a/RELEASES.md b/RELEASES.md index 57aacd8c5e..4e815b9b8e 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,3 +1,287 @@ +Version 1.13.0 (2016-11-10) +=========================== + +Language +-------- + +* [Stabilize the `?` operator][36995]. `?` is a simple way to propagate + errors, like the `try!` macro, described in [RFC 0243]. +* [Stabilize macros in type position][36014]. Described in [RFC 873]. +* [Stabilize attributes on statements][36995]. Described in [RFC 0016]. +* [Fix `#[derive]` for empty tuple structs/variants][35728] +* [Fix lifetime rules for 'if' conditions][36029] +* [Avoid loading and parsing unconfigured non-inline modules][36482] + +Compiler +-------- + +* [Add the `-C link-arg` argument][36574] +* [Remove the old AST-based backend from rustc_trans][35764] +* [Don't enable NEON by default on armv7 Linux][35814] +* [Fix debug line number info for macro expansions][35238] +* [Do not emit "class method" debuginfo for types that are not + DICompositeType][36008] +* [Warn about multiple conflicting #[repr] hints][34623] +* [When sizing DST, don't double-count nested struct prefixes][36351] +* [Default RUST_MIN_STACK to 16MiB for now][36505] +* [Improve rlib metadata format][36551]. Reduces rlib size significantly. +* [Reject macros with empty repetitions to avoid infinite loop][36721] +* [Expand macros without recursing to avoid stack overflows][36214] + +Diagnostics +----------- + +* [Replace macro backtraces with labeled local uses][35702] +* [Improve error message for missplaced doc comments][33922] +* [Buffer unix and lock windows to prevent message interleaving][35975] +* [Update lifetime errors to specifically note temporaries][36171] +* [Special case a few colors for Windows][36178] +* [Suggest `use self` when such an import resolves][36289] +* [Be more specific when type parameter shadows primitive type][36338] +* Many minor improvements + +Compile-time Optimizations +-------------------------- + +* [Compute and cache HIR hashes at beginning][35854] +* [Don't hash types in loan paths][36004] +* [Cache projections in trans][35761] +* [Optimize the parser's last token handling][36527] +* [Only instantiate #[inline] functions in codegen units referencing + them][36524]. This leads to big improvements in cases where crates export + define many inline functions without using them directly. +* [Lazily allocate TypedArena's first chunk][36592] +* [Don't allocate during default HashSet creation][36734] + +Stabilized APIs +--------------- + +* [`checked_abs`] +* [`wrapping_abs`] +* [`overflowing_abs`] +* [`RefCell::try_borrow`] +* [`RefCell::try_borrow_mut`] + +Libraries +--------- + +* [Add `assert_ne!` and `debug_assert_ne!`][35074] +* [Make `vec_deque::Drain`, `hash_map::Drain`, and `hash_set::Drain` + covariant][35354] +* [Implement `AsRef<[T]>` for `std::slice::Iter`][35559] +* [Implement `Debug` for `std::vec::IntoIter`][35707] +* [`CString`: avoid excessive growth just to 0-terminate][35871] +* [Implement `CoerceUnsized` for `{Cell, RefCell, UnsafeCell}`][35627] +* [Use arc4rand on FreeBSD][35884] +* [memrchr: Correct aligned offset computation][35969] +* [Improve Demangling of Rust Symbols][36059] +* [Use monotonic time in condition variables][35048] +* [Implement `Debug` for `std::path::{Components,Iter}`][36101] +* [Implement conversion traits for `char`][35755] +* [Fix illegal instruction caused by overflow in channel cloning][36104] +* [Zero first byte of CString on drop][36264] +* [Inherit overflow checks for sum and product][36372] +* [Add missing Eq implementations][36423] +* [Implement `Debug` for `DirEntry`][36631] +* [When `getaddrinfo` returns `EAI_SYSTEM` retrieve actual error from + `errno`][36754] +* [`SipHasher`] is deprecated. Use [`DefaultHasher`]. +* [Implement more traits for `std::io::ErrorKind`][35911] +* [Optimize BinaryHeap bounds checking][36072] +* [Work around pointer aliasing issue in `Vec::extend_from_slice`, + `extend_with_element`][36355] +* [Fix overflow checking in unsigned pow()][34942] + +Cargo +----- + +* This release includes security fixes to both curl and OpenSSL. +* [Fix transitive doctests when panic=abort][cargo/3021] +* [Add --all-features flag to cargo][cargo/3038] +* [Reject path-based dependencies in `cargo package`][cargo/3060] +* [Don't parse the home directory more than once][cargo/3078] +* [Don't try to generate Cargo.lock on empty workspaces][cargo/3092] +* [Update OpenSSL to 1.0.2j][cargo/3121] +* [Add license and license_file to cargo metadata output][cargo/3110] +* [Make crates-io registry URL optional in config; ignore all changes to + source.crates-io][cargo/3089] +* [Don't download dependencies from other platforms][cargo/3123] +* [Build transitive dev-dependencies when needed][cargo/3125] +* [Add support for per-target rustflags in .cargo/config][cargo/3157] +* [Avoid updating registry when adding existing deps][cargo/3144] +* [Warn about path overrides that won't work][cargo/3136] +* [Use workspaces during `cargo install`][cargo/3146] +* [Leak mspdbsrv.exe processes on Windows][cargo/3162] +* [Add --message-format flag][cargo/3000] +* [Pass target environment for rustdoc][cargo/3205] +* [Use `CommandExt::exec` for `cargo run` on Unix][cargo/2818] +* [Update curl and curl-sys][cargo/3241] +* [Call rustdoc test with the correct cfg flags of a package][cargo/3242] + +Tooling +------- + +* [rustdoc: Add the `--sysroot` argument][36586] +* [rustdoc: Fix a couple of issues with the search results][35655] +* [rustdoc: remove the `!` from macro URLs and titles][35234] +* [gdb: Fix pretty-printing special-cased Rust types][35585] +* [rustdoc: Filter more incorrect methods inherited through Deref][36266] + +Misc +---- + +* [Remove unmaintained style guide][35124] +* [Add s390x support][36369] +* [Initial work at Haiku OS support][36727] +* [Add mips-uclibc targets][35734] +* [Crate-ify compiler-rt into compiler-builtins][35021] +* [Add rustc version info (git hash + date) to dist tarball][36213] +* Many documentation improvements + +Compatibility Notes +------------------- + +* [`SipHasher`] is deprecated. Use [`DefaultHasher`]. +* [Deny (by default) transmuting from fn item types to pointer-sized + types][34923]. Continuing the long transition to zero-sized fn items, + per [RFC 401]. +* [Fix `#[derive]` for empty tuple structs/variants][35728]. + Part of [RFC 1506]. +* [Issue deprecation warnings for safe accesses to extern statics][36173] +* [Fix lifetime rules for 'if' conditions][36029]. +* [Inherit overflow checks for sum and product][36372]. +* [Forbid user-defined macros named "macro_rules"][36730]. + +[33922]: https://github.com/rust-lang/rust/pull/33922 +[34623]: https://github.com/rust-lang/rust/pull/34623 +[34923]: https://github.com/rust-lang/rust/pull/34923 +[34942]: https://github.com/rust-lang/rust/pull/34942 +[34982]: https://github.com/rust-lang/rust/pull/34982 +[35021]: https://github.com/rust-lang/rust/pull/35021 +[35048]: https://github.com/rust-lang/rust/pull/35048 +[35074]: https://github.com/rust-lang/rust/pull/35074 +[35124]: https://github.com/rust-lang/rust/pull/35124 +[35234]: https://github.com/rust-lang/rust/pull/35234 +[35238]: https://github.com/rust-lang/rust/pull/35238 +[35354]: https://github.com/rust-lang/rust/pull/35354 +[35559]: https://github.com/rust-lang/rust/pull/35559 +[35585]: https://github.com/rust-lang/rust/pull/35585 +[35627]: https://github.com/rust-lang/rust/pull/35627 +[35655]: https://github.com/rust-lang/rust/pull/35655 +[35702]: https://github.com/rust-lang/rust/pull/35702 +[35707]: https://github.com/rust-lang/rust/pull/35707 +[35728]: https://github.com/rust-lang/rust/pull/35728 +[35734]: https://github.com/rust-lang/rust/pull/35734 +[35755]: https://github.com/rust-lang/rust/pull/35755 +[35761]: https://github.com/rust-lang/rust/pull/35761 +[35764]: https://github.com/rust-lang/rust/pull/35764 +[35814]: https://github.com/rust-lang/rust/pull/35814 +[35854]: https://github.com/rust-lang/rust/pull/35854 +[35871]: https://github.com/rust-lang/rust/pull/35871 +[35884]: https://github.com/rust-lang/rust/pull/35884 +[35911]: https://github.com/rust-lang/rust/pull/35911 +[35969]: https://github.com/rust-lang/rust/pull/35969 +[35975]: https://github.com/rust-lang/rust/pull/35975 +[36004]: https://github.com/rust-lang/rust/pull/36004 +[36008]: https://github.com/rust-lang/rust/pull/36008 +[36014]: https://github.com/rust-lang/rust/pull/36014 +[36029]: https://github.com/rust-lang/rust/pull/36029 +[36059]: https://github.com/rust-lang/rust/pull/36059 +[36072]: https://github.com/rust-lang/rust/pull/36072 +[36101]: https://github.com/rust-lang/rust/pull/36101 +[36104]: https://github.com/rust-lang/rust/pull/36104 +[36171]: https://github.com/rust-lang/rust/pull/36171 +[36173]: https://github.com/rust-lang/rust/pull/36173 +[36178]: https://github.com/rust-lang/rust/pull/36178 +[36213]: https://github.com/rust-lang/rust/pull/36213 +[36214]: https://github.com/rust-lang/rust/pull/36214 +[36264]: https://github.com/rust-lang/rust/pull/36264 +[36266]: https://github.com/rust-lang/rust/pull/36266 +[36289]: https://github.com/rust-lang/rust/pull/36289 +[36338]: https://github.com/rust-lang/rust/pull/36338 +[36351]: https://github.com/rust-lang/rust/pull/36351 +[36355]: https://github.com/rust-lang/rust/pull/36355 +[36369]: https://github.com/rust-lang/rust/pull/36369 +[36372]: https://github.com/rust-lang/rust/pull/36372 +[36423]: https://github.com/rust-lang/rust/pull/36423 +[36482]: https://github.com/rust-lang/rust/pull/36482 +[36505]: https://github.com/rust-lang/rust/pull/36505 +[36524]: https://github.com/rust-lang/rust/pull/36524 +[36527]: https://github.com/rust-lang/rust/pull/36527 +[36551]: https://github.com/rust-lang/rust/pull/36551 +[36574]: https://github.com/rust-lang/rust/pull/36574 +[36586]: https://github.com/rust-lang/rust/pull/36586 +[36592]: https://github.com/rust-lang/rust/pull/36592 +[36631]: https://github.com/rust-lang/rust/pull/36631 +[36639]: https://github.com/rust-lang/rust/pull/36639 +[36721]: https://github.com/rust-lang/rust/pull/36721 +[36727]: https://github.com/rust-lang/rust/pull/36727 +[36730]: https://github.com/rust-lang/rust/pull/36730 +[36734]: https://github.com/rust-lang/rust/pull/36734 +[36754]: https://github.com/rust-lang/rust/pull/36754 +[36995]: https://github.com/rust-lang/rust/pull/36995 +[RFC 0016]: https://github.com/rust-lang/rfcs/blob/master/text/0016-more-attributes.md +[RFC 0243]: https://github.com/rust-lang/rfcs/blob/master/text/0243-trait-based-exception-handling.md +[RFC 1506]: https://github.com/rust-lang/rfcs/blob/master/text/1506-adt-kinds.md +[RFC 401]: https://github.com/rust-lang/rfcs/blob/master/text/0401-coercions.md +[RFC 873]: https://github.com/rust-lang/rfcs/blob/master/text/0873-type-macros.md +[cargo/2818]: https://github.com/rust-lang/cargo/pull/2818 +[cargo/3000]: https://github.com/rust-lang/cargo/pull/3000 +[cargo/3021]: https://github.com/rust-lang/cargo/pull/3021 +[cargo/3038]: https://github.com/rust-lang/cargo/pull/3038 +[cargo/3060]: https://github.com/rust-lang/cargo/pull/3060 +[cargo/3078]: https://github.com/rust-lang/cargo/pull/3078 +[cargo/3089]: https://github.com/rust-lang/cargo/pull/3089 +[cargo/3092]: https://github.com/rust-lang/cargo/pull/3092 +[cargo/3110]: https://github.com/rust-lang/cargo/pull/3110 +[cargo/3121]: https://github.com/rust-lang/cargo/pull/3121 +[cargo/3123]: https://github.com/rust-lang/cargo/pull/3123 +[cargo/3125]: https://github.com/rust-lang/cargo/pull/3125 +[cargo/3136]: https://github.com/rust-lang/cargo/pull/3136 +[cargo/3144]: https://github.com/rust-lang/cargo/pull/3144 +[cargo/3146]: https://github.com/rust-lang/cargo/pull/3146 +[cargo/3157]: https://github.com/rust-lang/cargo/pull/3157 +[cargo/3162]: https://github.com/rust-lang/cargo/pull/3162 +[cargo/3205]: https://github.com/rust-lang/cargo/pull/3205 +[cargo/3241]: https://github.com/rust-lang/cargo/pull/3241 +[cargo/3242]: https://github.com/rust-lang/cargo/pull/3242 +[rustup]: https://www.rustup.rs +[`checked_abs`]: https://doc.rust-lang.org/std/primitive.i32.html#method.checked_abs +[`wrapping_abs`]: https://doc.rust-lang.org/std/primitive.i32.html#method.wrapping_abs +[`overflowing_abs`]: https://doc.rust-lang.org/std/primitive.i32.html#method.overflowing_abs +[`RefCell::try_borrow`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html#method.try_borrow +[`RefCell::try_borrow_mut`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html#method.try_borrow_mut +[`SipHasher`]: https://doc.rust-lang.org/std/hash/struct.SipHasher.html +[`DefaultHasher`]: https://doc.rust-lang.org/std/collections/hash_map/struct.DefaultHasher.html + + +Version 1.12.1 (2016-10-20) +=========================== + +Regression Fixes +---------------- + +* [ICE: 'rustc' panicked at 'assertion failed: concrete_substs.is_normalized_for_trans()' #36381][36381] +* [Confusion with double negation and booleans][36856] +* [rustc 1.12.0 fails with SIGSEGV in release mode (syn crate 0.8.0)][36875] +* [Rustc 1.12.0 Windows build of `ethcore` crate fails with LLVM error][36924] +* [1.12.0: High memory usage when linking in release mode with debug info][36926] +* [Corrupted memory after updated to 1.12][36936] +* ["Let NullaryConstructor = something;" causes internal compiler error: "tried to overwrite interned AdtDef"][37026] +* [Fix ICE: inject bitcast if types mismatch for invokes/calls/stores][37112] +* [debuginfo: Handle spread_arg case in MIR-trans in a more stable way.][37153] + +[36381]: https://github.com/rust-lang/rust/issues/36381 +[36856]: https://github.com/rust-lang/rust/issues/36856 +[36875]: https://github.com/rust-lang/rust/issues/36875 +[36924]: https://github.com/rust-lang/rust/issues/36924 +[36926]: https://github.com/rust-lang/rust/issues/36926 +[36936]: https://github.com/rust-lang/rust/issues/36936 +[37026]: https://github.com/rust-lang/rust/issues/37026 +[37112]: https://github.com/rust-lang/rust/issues/37112 +[37153]: https://github.com/rust-lang/rust/issues/37153 + + Version 1.12.0 (2016-09-29) =========================== @@ -27,7 +311,7 @@ Compiler was previously described [on the Rust blog] (https://blog.rust-lang.org/2016/04/19/MIR.html). * [Print the Rust target name, not the LLVM target name, with - `--print-target-list`] + `--print target-list`] (https://github.com/rust-lang/rust/pull/35489) * [The computation of `TypeId` is correct in some cases where it was previously producing inconsistent results] @@ -74,7 +358,7 @@ Language useful] (https://github.com/rust-lang/rust/pull/34908) * [`macro_rules!` `stmt` matchers correctly consume the entire contents when - insider non-braces invocations] + inside non-braces invocations] (https://github.com/rust-lang/rust/pull/34886) * [Semicolons are properly required as statement delimeters inside `macro_rules!` invocations] @@ -208,9 +492,6 @@ Cargo Performance ----------- -* [`rustc` produces more compact code by more precisely identifying the live - ranges of variables] - (https://github.com/rust-lang/rust/pull/35409) * [`panic::catch_unwind` is more optimized] (https://github.com/rust-lang/rust/pull/35444) * [`panic::catch_unwind` no longer accesses thread-local storage on entry] diff --git a/configure b/configure index 18dc99dd6c..fd0397e5f8 100755 --- a/configure +++ b/configure @@ -360,6 +360,13 @@ abs_path() { (unset CDPATH && cd "$_path" > /dev/null && pwd) } +HELP=0 +for arg; do + case "$arg" in + --help) HELP=1;; + esac +done + msg "looking for configure programs" need_cmd cmp need_cmd mkdir @@ -430,6 +437,10 @@ case $CFG_OSTYPE in CFG_CPUTYPE=$(isainfo -n) ;; + Haiku) + CFG_OSTYPE=unknown-haiku + ;; + MINGW*) # msys' `uname` does not print gcc configuration, but prints msys # configuration. so we cannot believe `uname -m`: @@ -517,10 +528,18 @@ case $CFG_CPUTYPE in CFG_CPUTYPE=powerpc64le ;; + s390x) + CFG_CPUTYPE=s390x + ;; + x86_64 | x86-64 | x64 | amd64) CFG_CPUTYPE=x86_64 ;; + BePC) + CFG_CPUTYPE=i686 + ;; + *) err "unknown CPU type: $CFG_CPUTYPE" esac @@ -566,11 +585,8 @@ esac OPTIONS="" -HELP=0 -if [ "$1" = "--help" ] +if [ "$HELP" -eq 1 ] then - HELP=1 - shift echo echo "Usage: $CFG_SELF [options]" echo @@ -609,7 +625,6 @@ opt dist-host-only 0 "only install bins for the host architecture" opt inject-std-version 1 "inject the current compiler version of libstd into programs" opt llvm-version-check 1 "check if the LLVM version is supported, build anyway" opt rustbuild 0 "use the rust and cargo based build system" -opt orbit 1 "get MIR where it belongs - everywhere; most importantly, in orbit" opt codegen-tests 1 "run the src/test/codegen tests" opt option-checking 1 "complain about unrecognized options in this configure script" opt ninja 0 "build LLVM using the Ninja generator (for MSVC, requires building in the correct environment)" @@ -630,6 +645,7 @@ valopt datadir "${CFG_PREFIX}/share" "install data" valopt infodir "${CFG_PREFIX}/share/info" "install additional info" valopt llvm-root "" "set LLVM root" valopt python "" "set path to python" +valopt nodejs "" "set path to nodejs" valopt jemalloc-root "" "set directory where libjemalloc_pic.a is located" valopt build "${DEFAULT_BUILD}" "GNUs ./configure syntax LLVM build triple" valopt android-cross-path "" "Android NDK standalone path (deprecated)" @@ -668,6 +684,7 @@ valopt_nosave local-rust-root "/usr/local" "set prefix for local rust binary" valopt_nosave host "${CFG_BUILD}" "GNUs ./configure syntax LLVM host triples" valopt_nosave target "${CFG_HOST}" "GNUs ./configure syntax LLVM target triples" valopt_nosave mandir "${CFG_PREFIX}/share/man" "install man pages in PATH" +valopt_nosave docdir "${CFG_PREFIX}/share/doc/rust" "install documentation in PATH" # On Windows this determines root of the subtree for target libraries. # Host runtime libs always go to 'bin'. @@ -733,8 +750,6 @@ if [ -n "$CFG_ENABLE_DEBUG_ASSERTIONS" ]; then putvar CFG_ENABLE_DEBUG_ASSERTION if [ -n "$CFG_ENABLE_DEBUGINFO" ]; then putvar CFG_ENABLE_DEBUGINFO; fi if [ -n "$CFG_ENABLE_DEBUG_JEMALLOC" ]; then putvar CFG_ENABLE_DEBUG_JEMALLOC; fi -if [ -n "$CFG_DISABLE_ORBIT" ]; then putvar CFG_DISABLE_ORBIT; fi - step_msg "looking for build programs" probe_need CFG_CURL curl @@ -747,6 +762,9 @@ if [ $(echo $python_version | grep -c '^Python 2\.7') -ne 1 ]; then err "Found $python_version, but Python 2.7 is required" fi +# Checking for node, but not required +probe CFG_NODEJS nodejs node + # If we have no git directory then we are probably a tarball distribution # and shouldn't attempt to load submodules if [ ! -e ${CFG_SRC_DIR}.git ] @@ -899,7 +917,7 @@ then fi CMD="${CFG_LOCAL_RUST_ROOT}/bin/rustc${BIN_SUF}" - LRV=`$CMD --version` + LRV=`LD_LIBRARY_PATH=${CFG_LOCAL_RUST_ROOT}/lib $CMD --version` if [ $? -ne 0 ] then step_msg "failure while running $CMD --version" @@ -1115,6 +1133,7 @@ putvar CFG_STDCPP_NAME # a little post-processing of various config values CFG_PREFIX=${CFG_PREFIX%/} CFG_MANDIR=${CFG_MANDIR%/} +CFG_DOCDIR=${CFG_DOCDIR%/} CFG_HOST="$(echo $CFG_HOST | tr ',' ' ')" CFG_TARGET="$(echo $CFG_TARGET | tr ',' ' ')" CFG_SUPPORTED_TARGET="" @@ -1749,7 +1768,7 @@ do CMAKE_ARGS="$CMAKE_ARGS -DLLVM_ENABLE_ASSERTIONS=ON" fi - CMAKE_ARGS="$CMAKE_ARGS -DLLVM_TARGETS_TO_BUILD='X86;ARM;AArch64;Mips;PowerPC'" + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_TARGETS_TO_BUILD='X86;ARM;AArch64;Mips;PowerPC;SystemZ'" CMAKE_ARGS="$CMAKE_ARGS -G '$CFG_CMAKE_GENERATOR'" CMAKE_ARGS="$CMAKE_ARGS $CFG_LLVM_SRC_DIR" @@ -1796,6 +1815,7 @@ putvar CFG_ARMV7_LINUX_ANDROIDEABI_NDK putvar CFG_I686_LINUX_ANDROID_NDK putvar CFG_NACL_CROSS_PATH putvar CFG_MANDIR +putvar CFG_DOCDIR putvar CFG_USING_LIBCPP # Avoid spurious warnings from clang by feeding it original source on diff --git a/man/rustc.1 b/man/rustc.1 index 0eaf89a560..1656255956 100644 --- a/man/rustc.1 +++ b/man/rustc.1 @@ -1,4 +1,4 @@ -.TH RUSTC "1" "August 2016" "rustc 1.12.0" "User Commands" +.TH RUSTC "1" "September 2016" "rustc 1.13.0" "User Commands" .SH NAME rustc \- The Rust compiler .SH SYNOPSIS diff --git a/man/rustdoc.1 b/man/rustdoc.1 index 3fb5757f4f..4d885bd143 100644 --- a/man/rustdoc.1 +++ b/man/rustdoc.1 @@ -1,4 +1,4 @@ -.TH RUSTDOC "1" "August 2016" "rustdoc 1.12.0" "User Commands" +.TH RUSTDOC "1" "September 2016" "rustdoc 1.13.0" "User Commands" .SH NAME rustdoc \- generate documentation from Rust source code .SH SYNOPSIS diff --git a/mk/cfg/i686-unknown-haiku.mk b/mk/cfg/i686-unknown-haiku.mk new file mode 100644 index 0000000000..cbacbff070 --- /dev/null +++ b/mk/cfg/i686-unknown-haiku.mk @@ -0,0 +1,27 @@ +# i686-unknown-haiku configuration +CROSS_PREFIX_i686-unknown-haiku=i586-pc-haiku- +CC_i686-unknown-haiku=$(CC) +CXX_i686-unknown-haiku=$(CXX) +CPP_i686-unknown-haiku=$(CPP) +AR_i686-unknown-haiku=$(AR) +CFG_LIB_NAME_i686-unknown-haiku=lib$(1).so +CFG_STATIC_LIB_NAME_i686-unknown-haiku=lib$(1).a +CFG_LIB_GLOB_i686-unknown-haiku=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_i686-unknown-haiku=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_i686-unknown-haiku := -m32 $(CFLAGS) +CFG_GCCISH_CFLAGS_i686-unknown-haiku := -Wall -Werror -g -fPIC -m32 $(CFLAGS) +CFG_GCCISH_CXXFLAGS_i686-unknown-haiku := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_i686-unknown-haiku := -shared -fPIC -ldl -pthread -lrt -g -m32 +CFG_GCCISH_PRE_LIB_FLAGS_i686-unknown-haiku := -Wl,-whole-archive +CFG_GCCISH_POST_LIB_FLAGS_i686-unknown-haiku := -Wl,-no-whole-archive +CFG_DEF_SUFFIX_i686-unknown-haiku := .linux.def +CFG_LLC_FLAGS_i686-unknown-haiku := +CFG_INSTALL_NAME_i686-unknown-haiku = +CFG_EXE_SUFFIX_i686-unknown-haiku = +CFG_WINDOWSY_i686-unknown-haiku := +CFG_UNIXY_i686-unknown-haiku := 1 +CFG_PATH_MUNGE_i686-unknown-haiku := true +CFG_LDPATH_i686-unknown-haiku := +CFG_RUN_i686-unknown-haiku=$(2) +CFG_RUN_TARG_i686-unknown-haiku=$(call CFG_RUN_i686-unknown-haiku,,$(2)) +CFG_GNU_TRIPLE_i686-unknown-haiku := i686-unknown-haiku diff --git a/mk/cfg/mips-unknown-linux-uclibc.mk b/mk/cfg/mips-unknown-linux-uclibc.mk new file mode 100644 index 0000000000..34aee77ae2 --- /dev/null +++ b/mk/cfg/mips-unknown-linux-uclibc.mk @@ -0,0 +1 @@ +# rustbuild-only target diff --git a/mk/cfg/mips64-unknown-linux-gnuabi64.mk b/mk/cfg/mips64-unknown-linux-gnuabi64.mk new file mode 100644 index 0000000000..34aee77ae2 --- /dev/null +++ b/mk/cfg/mips64-unknown-linux-gnuabi64.mk @@ -0,0 +1 @@ +# rustbuild-only target diff --git a/mk/cfg/mips64el-unknown-linux-gnuabi64.mk b/mk/cfg/mips64el-unknown-linux-gnuabi64.mk new file mode 100644 index 0000000000..34aee77ae2 --- /dev/null +++ b/mk/cfg/mips64el-unknown-linux-gnuabi64.mk @@ -0,0 +1 @@ +# rustbuild-only target diff --git a/mk/cfg/mipsel-unknown-linux-uclibc.mk b/mk/cfg/mipsel-unknown-linux-uclibc.mk new file mode 100644 index 0000000000..34aee77ae2 --- /dev/null +++ b/mk/cfg/mipsel-unknown-linux-uclibc.mk @@ -0,0 +1 @@ +# rustbuild-only target diff --git a/mk/cfg/s390x-unknown-linux-gnu.mk b/mk/cfg/s390x-unknown-linux-gnu.mk new file mode 100644 index 0000000000..eb1cb2329c --- /dev/null +++ b/mk/cfg/s390x-unknown-linux-gnu.mk @@ -0,0 +1,24 @@ +# s390x-unknown-linux-gnu configuration +CROSS_PREFIX_s390x-unknown-linux-gnu=s390x-linux-gnu- +CC_s390x-unknown-linux-gnu=$(CC) +CXX_s390x-unknown-linux-gnu=$(CXX) +CPP_s390x-unknown-linux-gnu=$(CPP) +AR_s390x-unknown-linux-gnu=$(AR) +CFG_LIB_NAME_s390x-unknown-linux-gnu=lib$(1).so +CFG_STATIC_LIB_NAME_s390x-unknown-linux-gnu=lib$(1).a +CFG_LIB_GLOB_s390x-unknown-linux-gnu=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_s390x-unknown-linux-gnu=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_s390x-unknown-linux-gnu := -m64 $(CFLAGS) +CFG_GCCISH_CFLAGS_s390x-unknown-linux-gnu := -g -fPIC -m64 $(CFLAGS) +CFG_GCCISH_CXXFLAGS_s390x-unknown-linux-gnu := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_s390x-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m64 +CFG_GCCISH_DEF_FLAG_s390x-unknown-linux-gnu := -Wl,--export-dynamic,--dynamic-list= +CFG_LLC_FLAGS_s390x-unknown-linux-gnu := +CFG_INSTALL_NAME_s390x-unknown-linux-gnu = +CFG_EXE_SUFFIX_s390x-unknown-linux-gnu = +CFG_WINDOWSY_s390x-unknown-linux-gnu := +CFG_UNIXY_s390x-unknown-linux-gnu := 1 +CFG_LDPATH_s390x-unknown-linux-gnu := +CFG_RUN_s390x-unknown-linux-gnu=$(2) +CFG_RUN_TARG_s390x-unknown-linux-gnu=$(call CFG_RUN_s390x-unknown-linux-gnu,,$(2)) +CFG_GNU_TRIPLE_s390x-unknown-linux-gnu := s390x-unknown-linux-gnu diff --git a/mk/cfg/x86_64-unknown-haiku.mk b/mk/cfg/x86_64-unknown-haiku.mk new file mode 100644 index 0000000000..4c2d888be0 --- /dev/null +++ b/mk/cfg/x86_64-unknown-haiku.mk @@ -0,0 +1,27 @@ +# x86_64-unknown-haiku configuration +CROSS_PREFIX_x86_64-unknown-haiku=x86_64-unknown-haiku- +CC_x86_64-unknown-haiku=$(CC) +CXX_x86_64-unknown-haiku=$(CXX) +CPP_x86_64-unknown-haiku=$(CPP) +AR_x86_64-unknown-haiku=$(AR) +CFG_LIB_NAME_x86_64-unknown-haiku=lib$(1).so +CFG_STATIC_LIB_NAME_x86_64-unknown-haiku=lib$(1).a +CFG_LIB_GLOB_x86_64-unknown-haiku=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_x86_64-unknown-haiku=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_x86_64-unknown-haiku := -m64 $(CFLAGS) +CFG_GCCISH_CFLAGS_x86_64-unknown-haiku := -Wall -Werror -g -fPIC -m64 $(CFLAGS) +CFG_GCCISH_CXXFLAGS_x86_64-unknown-haiku := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_x86_64-unknown-haiku := -shared -fPIC -ldl -pthread -lrt -g -m64 +CFG_GCCISH_PRE_LIB_FLAGS_x86_64-unknown-haiku := -Wl,-whole-archive +CFG_GCCISH_POST_LIB_FLAGS_x86_64-unknown-haiku := -Wl,-no-whole-archive +CFG_DEF_SUFFIX_x86_64-unknown-haiku := .linux.def +CFG_LLC_FLAGS_x86_64-unknown-haiku := +CFG_INSTALL_NAME_x86_64-unknown-haiku = +CFG_EXE_SUFFIX_x86_64-unknown-haiku = +CFG_WINDOWSY_x86_64-unknown-haiku := +CFG_UNIXY_x86_64-unknown-haiku := 1 +CFG_PATH_MUNGE_x86_64-unknown-haiku := true +CFG_LDPATH_x86_64-unknown-haiku := +CFG_RUN_x86_64-unknown-haiku=$(2) +CFG_RUN_TARG_x86_64-unknown-haiku=$(call CFG_RUN_x86_64-unknown-haiku,,$(2)) +CFG_GNU_TRIPLE_x86_64-unknown-haiku := x86_64-unknown-haiku diff --git a/mk/clean.mk b/mk/clean.mk index ac34ac506b..3574f25d9b 100644 --- a/mk/clean.mk +++ b/mk/clean.mk @@ -102,7 +102,6 @@ define CLEAN_TARGET_STAGE_N clean$(1)_T_$(2)_H_$(3): \ $$(foreach crate,$$(CRATES),clean$(1)_T_$(2)_H_$(3)-lib-$$(crate)) \ $$(foreach tool,$$(TOOLS) $$(DEBUGGER_BIN_SCRIPTS_ALL),clean$(1)_T_$(2)_H_$(3)-tool-$$(tool)) - $$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/libcompiler-rt.a $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/librun_pass_stage* # For unix $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/run_pass_stage* # For windows diff --git a/mk/crates.mk b/mk/crates.mk index 0bd0c70bd0..86bb3a8ca0 100644 --- a/mk/crates.mk +++ b/mk/crates.mk @@ -51,7 +51,7 @@ TARGET_CRATES := libc std term \ getopts collections test rand \ - core alloc \ + compiler_builtins core alloc \ rustc_unicode rustc_bitflags \ alloc_system alloc_jemalloc \ panic_abort panic_unwind unwind @@ -59,12 +59,13 @@ RUSTC_CRATES := rustc rustc_typeck rustc_mir rustc_borrowck rustc_resolve rustc_ rustc_trans rustc_back rustc_llvm rustc_privacy rustc_lint \ rustc_data_structures rustc_platform_intrinsics rustc_errors \ rustc_plugin rustc_metadata rustc_passes rustc_save_analysis \ - rustc_const_eval rustc_const_math rustc_incremental -HOST_CRATES := syntax syntax_ext syntax_pos $(RUSTC_CRATES) rustdoc fmt_macros \ - flate arena graphviz rbml log serialize + rustc_const_eval rustc_const_math rustc_incremental rustc_macro +HOST_CRATES := syntax syntax_ext proc_macro syntax_pos $(RUSTC_CRATES) rustdoc fmt_macros \ + flate arena graphviz log serialize TOOLS := compiletest rustdoc rustc rustbook error_index_generator DEPS_core := +DEPS_compiler_builtins := core DEPS_alloc := core libc alloc_system DEPS_alloc_system := core libc DEPS_alloc_jemalloc := core libc native:jemalloc @@ -77,12 +78,14 @@ DEPS_panic_abort := libc alloc DEPS_panic_unwind := libc alloc unwind DEPS_unwind := libc +RUSTFLAGS_compiler_builtins := -lstatic=compiler-rt + # FIXME(stage0): change this to just `RUSTFLAGS_panic_abort := ...` RUSTFLAGS1_panic_abort := -C panic=abort RUSTFLAGS2_panic_abort := -C panic=abort RUSTFLAGS3_panic_abort := -C panic=abort -DEPS_std := core libc rand alloc collections rustc_unicode \ +DEPS_std := core libc rand alloc collections compiler_builtins rustc_unicode \ native:backtrace \ alloc_system panic_abort panic_unwind unwind DEPS_arena := std @@ -93,34 +96,37 @@ DEPS_getopts := std DEPS_graphviz := std DEPS_log := std DEPS_num := std -DEPS_rbml := std log serialize DEPS_serialize := std log DEPS_term := std DEPS_test := std getopts term native:rust_test_helpers DEPS_syntax := std term serialize log arena libc rustc_bitflags rustc_unicode rustc_errors syntax_pos -DEPS_syntax_ext := syntax syntax_pos rustc_errors fmt_macros +DEPS_syntax_ext := syntax syntax_pos rustc_errors fmt_macros rustc_macro +DEPS_proc_macro := syntax syntax_pos rustc_plugin log DEPS_syntax_pos := serialize DEPS_rustc_const_math := std syntax log serialize DEPS_rustc_const_eval := rustc_const_math rustc syntax log serialize \ rustc_back graphviz syntax_pos -DEPS_rustc := syntax fmt_macros flate arena serialize getopts rbml \ +DEPS_rustc := syntax fmt_macros flate arena serialize getopts \ log graphviz rustc_llvm rustc_back rustc_data_structures\ rustc_const_math syntax_pos rustc_errors DEPS_rustc_back := std syntax flate log libc DEPS_rustc_borrowck := rustc log graphviz syntax syntax_pos rustc_errors rustc_mir -DEPS_rustc_data_structures := std log serialize +DEPS_rustc_data_structures := std log serialize libc DEPS_rustc_driver := arena flate getopts graphviz libc rustc rustc_back rustc_borrowck \ rustc_typeck rustc_mir rustc_resolve log syntax serialize rustc_llvm \ - rustc_trans rustc_privacy rustc_lint rustc_plugin \ - rustc_metadata syntax_ext rustc_passes rustc_save_analysis rustc_const_eval \ - rustc_incremental syntax_pos rustc_errors + rustc_trans rustc_privacy rustc_lint rustc_plugin \ + rustc_metadata syntax_ext proc_macro \ + rustc_passes rustc_save_analysis rustc_const_eval \ + rustc_incremental syntax_pos rustc_errors rustc_macro DEPS_rustc_errors := log libc serialize syntax_pos DEPS_rustc_lint := rustc log syntax syntax_pos rustc_const_eval DEPS_rustc_llvm := native:rustllvm libc std rustc_bitflags -DEPS_rustc_metadata := rustc syntax syntax_pos rustc_errors rbml rustc_const_math +DEPS_rustc_macro := std syntax +DEPS_rustc_metadata := rustc syntax syntax_pos rustc_errors rustc_const_math \ + rustc_macro syntax_ext DEPS_rustc_passes := syntax syntax_pos rustc core rustc_const_eval rustc_errors DEPS_rustc_mir := rustc syntax syntax_pos rustc_const_math rustc_const_eval rustc_bitflags DEPS_rustc_resolve := arena rustc log syntax syntax_pos rustc_errors @@ -130,14 +136,13 @@ DEPS_rustc_privacy := rustc log syntax syntax_pos DEPS_rustc_trans := arena flate getopts graphviz libc rustc rustc_back \ log syntax serialize rustc_llvm rustc_platform_intrinsics \ rustc_const_math rustc_const_eval rustc_incremental rustc_errors syntax_pos -DEPS_rustc_incremental := rbml rustc syntax_pos serialize rustc_data_structures +DEPS_rustc_incremental := rustc syntax_pos serialize rustc_data_structures DEPS_rustc_save_analysis := rustc log syntax syntax_pos serialize DEPS_rustc_typeck := rustc syntax syntax_pos rustc_platform_intrinsics rustc_const_math \ rustc_const_eval rustc_errors -DEPS_rustdoc := rustc rustc_driver native:hoedown serialize getopts \ - test rustc_lint rustc_const_eval syntax_pos - +DEPS_rustdoc := rustc rustc_driver native:hoedown serialize getopts test \ + rustc_lint rustc_const_eval syntax_pos rustc_data_structures TOOL_DEPS_compiletest := test getopts log serialize TOOL_DEPS_rustdoc := rustdoc @@ -150,6 +155,7 @@ TOOL_SOURCE_rustc := $(S)src/driver/driver.rs TOOL_SOURCE_rustbook := $(S)src/tools/rustbook/main.rs TOOL_SOURCE_error_index_generator := $(S)src/tools/error_index_generator/main.rs +ONLY_RLIB_compiler_builtins := 1 ONLY_RLIB_core := 1 ONLY_RLIB_libc := 1 ONLY_RLIB_alloc := 1 diff --git a/mk/dist.mk b/mk/dist.mk index e81371037a..cb0bca01e6 100644 --- a/mk/dist.mk +++ b/mk/dist.mk @@ -76,6 +76,7 @@ tmp/dist/$$(SRC_PKG_NAME)-image: $(PKG_FILES) @$(call E, making src image) $(Q)rm -Rf tmp/dist/$(SRC_PKG_NAME)-image $(Q)mkdir -p tmp/dist/$(SRC_PKG_NAME)-image/lib/rustlib/src/rust + $(Q)echo "$(CFG_VERSION)" > tmp/dist/$(SRC_PKG_NAME)-image/lib/rustlib/src/rust/version $(Q)tar \ -C $(S) \ -f - \ diff --git a/mk/docs.mk b/mk/docs.mk index f202c75360..6c0be654e1 100644 --- a/mk/docs.mk +++ b/mk/docs.mk @@ -66,7 +66,7 @@ ERR_IDX_GEN_MD = $(RPATH_VAR2_T_$(CFG_BUILD)_H_$(CFG_BUILD)) $(ERR_IDX_GEN_EXE) D := $(S)src/doc -DOC_TARGETS := book nomicon style error-index +DOC_TARGETS := book nomicon error-index COMPILER_DOC_TARGETS := DOC_L10N_TARGETS := @@ -209,13 +209,6 @@ doc/nomicon/index.html: $(RUSTBOOK_EXE) $(wildcard $(S)/src/doc/nomicon/*.md) | $(Q)rm -rf doc/nomicon $(Q)$(RUSTBOOK) build $(S)src/doc/nomicon doc/nomicon -style: doc/style/index.html - -doc/style/index.html: $(RUSTBOOK_EXE) $(wildcard $(S)/src/doc/style/*.md) | doc/ - @$(call E, rustbook: $@) - $(Q)rm -rf doc/style - $(Q)$(RUSTBOOK) build $(S)src/doc/style doc/style - error-index: doc/error-index.html # Metadata used to generate the index is created as a side effect of diff --git a/mk/install.mk b/mk/install.mk index d2e5449a2f..be212869f0 100644 --- a/mk/install.mk +++ b/mk/install.mk @@ -12,7 +12,8 @@ RUN_INSTALLER = cd tmp/empty_dir && \ sh ../../tmp/dist/$(1)/install.sh \ --prefix="$(DESTDIR)$(CFG_PREFIX)" \ --libdir="$(DESTDIR)$(CFG_LIBDIR)" \ - --mandir="$(DESTDIR)$(CFG_MANDIR)" + --mandir="$(DESTDIR)$(CFG_MANDIR)" \ + --docdir="$(DESTDIR)$(CFG_DOCDIR)" install: ifeq (root user, $(USER) $(patsubst %,user,$(SUDO_USER))) diff --git a/mk/main.mk b/mk/main.mk index 171ded4230..45c085c27c 100644 --- a/mk/main.mk +++ b/mk/main.mk @@ -13,12 +13,12 @@ ###################################################################### # The version number -CFG_RELEASE_NUM=1.12.1 +CFG_RELEASE_NUM=1.13.0 # An optional number to put after the label, e.g. '.2' -> '-beta.2' # NB Make sure it starts with a dot to conform to semver pre-release # versions (section 9) -CFG_PRERELEASE_VERSION=.6 +CFG_PRERELEASE_VERSION=.3 ifeq ($(CFG_RELEASE_CHANNEL),stable) # This is the normal semver version string, e.g. "0.12.0", "0.12.0-nightly" @@ -53,17 +53,6 @@ endif # versions in the same place CFG_FILENAME_EXTRA=$(shell printf '%s' $(CFG_RELEASE)$(CFG_EXTRA_FILENAME) | $(CFG_HASH_COMMAND)) -# A magic value that allows the compiler to use unstable features during the -# bootstrap even when doing so would normally be an error because of feature -# staging or because the build turns on warnings-as-errors and unstable features -# default to warnings. The build has to match this key in an env var. -# -# This value is keyed off the release to ensure that all compilers for one -# particular release have the same bootstrap key. Note that this is -# intentionally not "secure" by any definition, this is largely just a deterrent -# from users enabling unstable features on the stable compiler. -CFG_BOOTSTRAP_KEY=$(CFG_FILENAME_EXTRA) - # If local-rust is the same as the current version, then force a local-rebuild ifdef CFG_ENABLE_LOCAL_RUST ifeq ($(CFG_RELEASE),\ @@ -73,14 +62,6 @@ ifeq ($(CFG_RELEASE),\ endif endif -# The stage0 compiler needs to use the previous key recorded in src/stage0.txt, -# except for local-rebuild when it just uses the same current key. -ifdef CFG_ENABLE_LOCAL_REBUILD -CFG_BOOTSTRAP_KEY_STAGE0=$(CFG_BOOTSTRAP_KEY) -else -CFG_BOOTSTRAP_KEY_STAGE0=$(shell sed -ne 's/^rustc_key: //p' $(S)src/stage0.txt) -endif - # The name of the package to use for creating tarballs, installers etc. CFG_PACKAGE_NAME=rustc-$(CFG_PACKAGE_VERS) @@ -162,12 +143,6 @@ ifdef CFG_ENABLE_DEBUGINFO CFG_RUSTC_FLAGS += -g endif -ifdef CFG_DISABLE_ORBIT - $(info cfg: HOLD HOLD HOLD (CFG_DISABLE_ORBIT)) - RUSTFLAGS_STAGE1 += -Z orbit=off - RUSTFLAGS_STAGE2 += -Z orbit=off -endif - ifdef SAVE_TEMPS CFG_RUSTC_FLAGS += -C save-temps endif @@ -306,7 +281,7 @@ endif # LLVM macros ###################################################################### -LLVM_OPTIONAL_COMPONENTS=x86 arm aarch64 mips powerpc pnacl +LLVM_OPTIONAL_COMPONENTS=x86 arm aarch64 mips powerpc pnacl systemz LLVM_REQUIRED_COMPONENTS=ipo bitreader bitwriter linker asmparser mcjit \ interpreter instrumentation @@ -354,6 +329,7 @@ LLVM_AS_$(1)=$$(CFG_LLVM_INST_DIR_$(1))/bin/llvm-as$$(X_$(1)) LLC_$(1)=$$(CFG_LLVM_INST_DIR_$(1))/bin/llc$$(X_$(1)) LLVM_ALL_COMPONENTS_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --components) +LLVM_VERSION_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --version) endef @@ -392,13 +368,16 @@ CFG_INFO := $(info cfg: disabling unstable features (CFG_DISABLE_UNSTABLE_FEATUR # Turn on feature-staging export CFG_DISABLE_UNSTABLE_FEATURES # Subvert unstable feature lints to do the self-build -export RUSTC_BOOTSTRAP_KEY:=$(CFG_BOOTSTRAP_KEY) +export RUSTC_BOOTSTRAP=1 endif -export CFG_BOOTSTRAP_KEY ifdef CFG_MUSL_ROOT export CFG_MUSL_ROOT endif +# FIXME: Transitionary measure to bootstrap using the old bootstrap logic. +# Remove this once the bootstrap compiler uses the new login in Issue #36548. +export RUSTC_BOOTSTRAP_KEY=5c6cf767 + ###################################################################### # Per-stage targets and runner ###################################################################### @@ -460,7 +439,10 @@ endif TSREQ$(1)_T_$(2)_H_$(3) = \ $$(HSREQ$(1)_H_$(3)) \ $$(foreach obj,$$(REQUIRED_OBJECTS_$(2)),\ - $$(TLIB$(1)_T_$(2)_H_$(3))/$$(obj)) + $$(TLIB$(1)_T_$(2)_H_$(3))/$$(obj)) \ + $$(TLIB0_T_$(2)_H_$(3))/$$(call CFG_STATIC_LIB_NAME_$(2),compiler-rt) +# ^ This copies `libcompiler-rt.a` to the stage0 sysroot +# ^ TODO(stage0) update this to not copy `libcompiler-rt.a` to stage0 # Prerequisites for a working stageN compiler and libraries, for a specific # target @@ -514,10 +496,14 @@ ifeq ($$(OSTYPE_$(3)),apple-darwin) else ifeq ($$(CFG_WINDOWSY_$(3)),1) LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3) := PATH +else +ifeq ($$(OSTYPE_$(3)),unknown-haiku) + LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3) := LIBRARY_PATH else LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3) := LD_LIBRARY_PATH endif endif +endif LD_LIBRARY_PATH_ENV_HOSTDIR$(1)_T_$(2)_H_$(3) := \ $$(CURDIR)/$$(HLIB$(1)_H_$(3)):$$(CFG_LLVM_INST_DIR_$(3))/lib @@ -635,7 +621,8 @@ ALL_TARGET_RULES = $(foreach target,$(CFG_TARGET), \ $(foreach host,$(CFG_HOST), \ all-target-$(target)-host-$(host))) -all: $(ALL_TARGET_RULES) $(GENERATED) docs +all-no-docs: $(ALL_TARGET_RULES) $(GENERATED) +all: all-no-docs docs ###################################################################### # Build system documentation diff --git a/mk/platform.mk b/mk/platform.mk index d601cab722..6a7a20cbfd 100644 --- a/mk/platform.mk +++ b/mk/platform.mk @@ -102,8 +102,6 @@ include $(wildcard $(CFG_SRC_DIR)mk/cfg/*.mk) define ADD_INSTALLED_OBJECTS INSTALLED_OBJECTS_$(1) += $$(CFG_INSTALLED_OBJECTS_$(1)) REQUIRED_OBJECTS_$(1) += $$(CFG_THIRD_PARTY_OBJECTS_$(1)) - INSTALLED_OBJECTS_$(1) += $$(call CFG_STATIC_LIB_NAME_$(1),compiler-rt) - REQUIRED_OBJECTS_$(1) += $$(call CFG_STATIC_LIB_NAME_$(1),compiler-rt) endef $(foreach target,$(CFG_TARGET), \ diff --git a/mk/rt.mk b/mk/rt.mk index e86aec6089..a67bded288 100644 --- a/mk/rt.mk +++ b/mk/rt.mk @@ -37,6 +37,16 @@ ################################################################################ NATIVE_LIBS := hoedown miniz rust_test_helpers +# A macro to add a generic implementation of intrinsics iff a arch optimized implementation is not +# already in the list. +# $(1) is the target +# $(2) is the intrinsic +define ADD_INTRINSIC + ifeq ($$(findstring X,$$(foreach intrinsic,$$(COMPRT_OBJS_$(1)),$$(if $$(findstring $(2),$$(intrinsic)),X,))),) + COMPRT_OBJS_$(1) += $(2) + endif +endef + # $(1) is the target triple define NATIVE_LIBRARIES @@ -230,167 +240,15 @@ COMPRT_NAME_$(1) := $$(call CFG_STATIC_LIB_NAME_$(1),compiler-rt) COMPRT_LIB_$(1) := $$(RT_OUTPUT_DIR_$(1))/$$(COMPRT_NAME_$(1)) COMPRT_BUILD_DIR_$(1) := $$(RT_OUTPUT_DIR_$(1))/compiler-rt -# GENERIC_SOURCES in CMakeLists.txt -COMPRT_OBJS_$(1) := \ - absvdi2.o \ - absvsi2.o \ - adddf3.o \ - addsf3.o \ - addvdi3.o \ - addvsi3.o \ - apple_versioning.o \ - ashldi3.o \ - ashrdi3.o \ - clear_cache.o \ - clzdi2.o \ - clzsi2.o \ - cmpdi2.o \ - comparedf2.o \ - comparesf2.o \ - ctzdi2.o \ - ctzsi2.o \ - divdc3.o \ - divdf3.o \ - divdi3.o \ - divmoddi4.o \ - divmodsi4.o \ - divsc3.o \ - divsf3.o \ - divsi3.o \ - divxc3.o \ - extendsfdf2.o \ - extendhfsf2.o \ - ffsdi2.o \ - fixdfdi.o \ - fixdfsi.o \ - fixsfdi.o \ - fixsfsi.o \ - fixunsdfdi.o \ - fixunsdfsi.o \ - fixunssfdi.o \ - fixunssfsi.o \ - fixunsxfdi.o \ - fixunsxfsi.o \ - fixxfdi.o \ - floatdidf.o \ - floatdisf.o \ - floatdixf.o \ - floatsidf.o \ - floatsisf.o \ - floatundidf.o \ - floatundisf.o \ - floatundixf.o \ - floatunsidf.o \ - floatunsisf.o \ - int_util.o \ - lshrdi3.o \ - moddi3.o \ - modsi3.o \ - muldc3.o \ - muldf3.o \ - muldi3.o \ - mulodi4.o \ - mulosi4.o \ - muloti4.o \ - mulsc3.o \ - mulsf3.o \ - mulvdi3.o \ - mulvsi3.o \ - mulxc3.o \ - negdf2.o \ - negdi2.o \ - negsf2.o \ - negvdi2.o \ - negvsi2.o \ - paritydi2.o \ - paritysi2.o \ - popcountdi2.o \ - popcountsi2.o \ - powidf2.o \ - powisf2.o \ - powixf2.o \ - subdf3.o \ - subsf3.o \ - subvdi3.o \ - subvsi3.o \ - truncdfhf2.o \ - truncdfsf2.o \ - truncsfhf2.o \ - ucmpdi2.o \ - udivdi3.o \ - udivmoddi4.o \ - udivmodsi4.o \ - udivsi3.o \ - umoddi3.o \ - umodsi3.o - -ifeq ($$(findstring ios,$(1)),) -COMPRT_OBJS_$(1) += \ - absvti2.o \ - addtf3.o \ - addvti3.o \ - ashlti3.o \ - ashrti3.o \ - clzti2.o \ - cmpti2.o \ - ctzti2.o \ - divtf3.o \ - divti3.o \ - ffsti2.o \ - fixdfti.o \ - fixsfti.o \ - fixunsdfti.o \ - fixunssfti.o \ - fixunsxfti.o \ - fixxfti.o \ - floattidf.o \ - floattisf.o \ - floattixf.o \ - floatuntidf.o \ - floatuntisf.o \ - floatuntixf.o \ - lshrti3.o \ - modti3.o \ - multf3.o \ - multi3.o \ - mulvti3.o \ - negti2.o \ - negvti2.o \ - parityti2.o \ - popcountti2.o \ - powitf2.o \ - subtf3.o \ - subvti3.o \ - trampoline_setup.o \ - ucmpti2.o \ - udivmodti4.o \ - udivti3.o \ - umodti3.o -endif - -ifeq ($$(findstring apple,$(1)),apple) -COMPRT_OBJS_$(1) += \ - atomic_flag_clear.o \ - atomic_flag_clear_explicit.o \ - atomic_flag_test_and_set.o \ - atomic_flag_test_and_set_explicit.o \ - atomic_signal_fence.o \ - atomic_thread_fence.o -endif - +# We must avoid compiling both a generic implementation (e.g. `floatdidf.c) and an arch optimized +# implementation (e.g. `x86_64/floatdidf.S) of the same symbol (e.g. `floatdidf) because that causes +# linker errors. To avoid that, we first add all the arch optimized implementations and then add the +# generic implementations if and only if its arch optimized version is not already in the list. This +# last part is handled by the ADD_INTRINSIC macro. -ifeq ($$(findstring windows,$(1)),) -COMPRT_OBJS_$(1) += emutls.o -endif +COMPRT_OBJS_$(1) := ifeq ($$(findstring msvc,$(1)),) - -ifeq ($$(findstring freebsd,$(1)),) -COMPRT_OBJS_$(1) += gcc_personality_v0.o -endif - -COMPRT_OBJS_$(1) += emutls.o - ifeq ($$(findstring x86_64,$(1)),x86_64) COMPRT_OBJS_$(1) += \ x86_64/chkstk.o \ @@ -540,9 +398,166 @@ COMPRT_OBJS_$(1) += \ arm/unordsf2vfp.o endif +$(foreach intrinsic,absvdi2.o \ + absvsi2.o \ + adddf3.o \ + addsf3.o \ + addvdi3.o \ + addvsi3.o \ + apple_versioning.o \ + ashldi3.o \ + ashrdi3.o \ + clear_cache.o \ + clzdi2.o \ + clzsi2.o \ + cmpdi2.o \ + comparedf2.o \ + comparesf2.o \ + ctzdi2.o \ + ctzsi2.o \ + divdc3.o \ + divdf3.o \ + divdi3.o \ + divmoddi4.o \ + divmodsi4.o \ + divsc3.o \ + divsf3.o \ + divsi3.o \ + divxc3.o \ + extendsfdf2.o \ + extendhfsf2.o \ + ffsdi2.o \ + fixdfdi.o \ + fixdfsi.o \ + fixsfdi.o \ + fixsfsi.o \ + fixunsdfdi.o \ + fixunsdfsi.o \ + fixunssfdi.o \ + fixunssfsi.o \ + fixunsxfdi.o \ + fixunsxfsi.o \ + fixxfdi.o \ + floatdidf.o \ + floatdisf.o \ + floatdixf.o \ + floatsidf.o \ + floatsisf.o \ + floatundidf.o \ + floatundisf.o \ + floatundixf.o \ + floatunsidf.o \ + floatunsisf.o \ + int_util.o \ + lshrdi3.o \ + moddi3.o \ + modsi3.o \ + muldc3.o \ + muldf3.o \ + muldi3.o \ + mulodi4.o \ + mulosi4.o \ + muloti4.o \ + mulsc3.o \ + mulsf3.o \ + mulvdi3.o \ + mulvsi3.o \ + mulxc3.o \ + negdf2.o \ + negdi2.o \ + negsf2.o \ + negvdi2.o \ + negvsi2.o \ + paritydi2.o \ + paritysi2.o \ + popcountdi2.o \ + popcountsi2.o \ + powidf2.o \ + powisf2.o \ + powixf2.o \ + subdf3.o \ + subsf3.o \ + subvdi3.o \ + subvsi3.o \ + truncdfhf2.o \ + truncdfsf2.o \ + truncsfhf2.o \ + ucmpdi2.o \ + udivdi3.o \ + udivmoddi4.o \ + udivmodsi4.o \ + udivsi3.o \ + umoddi3.o \ + umodsi3.o, + $(call ADD_INTRINSIC,$(1),$(intrinsic))) + +ifeq ($$(findstring ios,$(1)),) +$(foreach intrinsic,absvti2.o \ + addtf3.o \ + addvti3.o \ + ashlti3.o \ + ashrti3.o \ + clzti2.o \ + cmpti2.o \ + ctzti2.o \ + divtf3.o \ + divti3.o \ + ffsti2.o \ + fixdfti.o \ + fixsfti.o \ + fixunsdfti.o \ + fixunssfti.o \ + fixunsxfti.o \ + fixxfti.o \ + floattidf.o \ + floattisf.o \ + floattixf.o \ + floatuntidf.o \ + floatuntisf.o \ + floatuntixf.o \ + lshrti3.o \ + modti3.o \ + multf3.o \ + multi3.o \ + mulvti3.o \ + negti2.o \ + negvti2.o \ + parityti2.o \ + popcountti2.o \ + powitf2.o \ + subtf3.o \ + subvti3.o \ + trampoline_setup.o \ + ucmpti2.o \ + udivmodti4.o \ + udivti3.o \ + umodti3.o, + $(call ADD_INTRINSIC,$(1),$(intrinsic))) +endif + +ifeq ($$(findstring apple,$(1)),apple) +$(foreach intrinsic,atomic_flag_clear.o \ + atomic_flag_clear_explicit.o \ + atomic_flag_test_and_set.o \ + atomic_flag_test_and_set_explicit.o \ + atomic_signal_fence.o \ + atomic_thread_fence.o, + $(call ADD_INTRINSIC,$(1),$(intrinsic))) +endif + +ifeq ($$(findstring windows,$(1)),) +$(call ADD_INTRINSIC,$(1),emutls.o) +endif + +ifeq ($$(findstring msvc,$(1)),) + +ifeq ($$(findstring freebsd,$(1)),) +$(call ADD_INTRINSIC,$(1),gcc_personality_v0.o) +endif +endif + ifeq ($$(findstring aarch64,$(1)),aarch64) -COMPRT_OBJS_$(1) += \ - comparetf2.o \ +$(foreach intrinsic,comparetf2.o \ extenddftf2.o \ extendsftf2.o \ fixtfdi.o \ @@ -557,7 +572,8 @@ COMPRT_OBJS_$(1) += \ floatunsitf.o \ multc3.o \ trunctfdf2.o \ - trunctfsf2.o + trunctfsf2.o, + $(call ADD_INTRINSIC,$(1),$(intrinsic))) endif ifeq ($$(findstring msvc,$(1)),msvc) diff --git a/mk/target.mk b/mk/target.mk index 2a08b7b046..1b139909ab 100644 --- a/mk/target.mk +++ b/mk/target.mk @@ -42,23 +42,6 @@ $(foreach host,$(CFG_HOST), \ $(foreach crate,$(CRATES), \ $(eval $(call RUST_CRATE_FULLDEPS,$(stage),$(target),$(host),$(crate))))))) -# $(1) stage -# $(2) target -# $(3) host -define DEFINE_BOOTSTRAP_KEY -BOOTSTRAP_KEY$(1)_T_$(2)_H_$(3) := $$(CFG_BOOTSTRAP_KEY) -ifeq ($(1),0) -ifeq ($(3),$$(CFG_BUILD)) -BOOTSTRAP_KEY$(1)_T_$(2)_H_$(3) := $$(CFG_BOOTSTRAP_KEY_STAGE0) -endif -endif -endef - -$(foreach host,$(CFG_TARGET), \ - $(foreach target,$(CFG_TARGET), \ - $(foreach stage,$(STAGES), \ - $(eval $(call DEFINE_BOOTSTRAP_KEY,$(stage),$(target),$(host)))))) - # RUST_TARGET_STAGE_N template: This defines how target artifacts are built # for all stage/target architecture combinations. This is one giant rule which # works as follows: @@ -83,8 +66,6 @@ $(foreach host,$(CFG_TARGET), \ define RUST_TARGET_STAGE_N $$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$(4): CFG_COMPILER_HOST_TRIPLE = $(2) -$$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$(4): \ - export RUSTC_BOOTSTRAP_KEY := $$(BOOTSTRAP_KEY$(1)_T_$(2)_H_$(3)) $$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$(4): \ $$(CRATEFILE_$(4)) \ $$(CRATE_FULLDEPS_$(1)_T_$(2)_H_$(3)_$(4)) \ @@ -132,8 +113,6 @@ endef # $(4) - name of the tool being built define TARGET_TOOL -$$(TBIN$(1)_T_$(2)_H_$(3))/$(4)$$(X_$(2)): \ - export RUSTC_BOOTSTRAP_KEY := $$(BOOTSTRAP_KEY$(1)_T_$(2)_H_$(3)) $$(TBIN$(1)_T_$(2)_H_$(3))/$(4)$$(X_$(2)): \ $$(TOOL_SOURCE_$(4)) \ $$(TOOL_INPUTS_$(4)) \ diff --git a/mk/tests.mk b/mk/tests.mk index 201e4cae51..fc1f4b5561 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -27,7 +27,7 @@ TEST_TARGET_CRATES = $(filter-out core rustc_unicode alloc_system libc \ panic_abort,$(TARGET_CRATES)) \ collectionstest coretest TEST_DOC_CRATES = $(DOC_CRATES) arena flate fmt_macros getopts graphviz \ - log rand rbml serialize syntax term test + log rand serialize syntax term test TEST_HOST_CRATES = $(filter-out rustc_typeck rustc_borrowck rustc_resolve \ rustc_trans rustc_lint,\ $(HOST_CRATES)) @@ -649,6 +649,7 @@ CTEST_COMMON_ARGS$(1)-T-$(2)-H-$(3) = \ --lldb-python $$(CFG_LLDB_PYTHON) \ --gdb-version="$(CFG_GDB_VERSION)" \ --lldb-version="$(CFG_LLDB_VERSION)" \ + --llvm-version="$$(LLVM_VERSION_$(3))" \ --android-cross-path=$(CFG_ARM_LINUX_ANDROIDEABI_NDK) \ --adb-path=$(CFG_ADB) \ --adb-test-dir=$(CFG_ADB_TEST_DIR) \ diff --git a/src/bootstrap/Cargo.lock b/src/bootstrap/Cargo.lock index d52577eb22..36b94e4ebe 100644 --- a/src/bootstrap/Cargo.lock +++ b/src/bootstrap/Cargo.lock @@ -157,3 +157,24 @@ name = "winapi-build" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[metadata] +"checksum aho-corasick 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2b3fb52b09c1710b961acb35390d514be82e4ac96a9969a8e38565a29b878dc9" +"checksum cmake 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "dfcf5bcece56ef953b8ea042509e9dcbdfe97820b7e20d86beb53df30ed94978" +"checksum filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "5363ab8e4139b8568a6237db5248646e5a8a2f89bd5ccb02092182b11fd3e922" +"checksum gcc 0.3.31 (git+https://github.com/alexcrichton/gcc-rs)" = "" +"checksum gcc 0.3.31 (registry+https://github.com/rust-lang/crates.io-index)" = "cfe877476e53690ebb0ce7325d0bf43e198d9500291b54b3c65e518de5039b07" +"checksum getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9047cfbd08a437050b363d35ef160452c5fe8ea5187ae0a624708c91581d685" +"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +"checksum libc 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "55f3730be7e803cf350d32061958171731c2395831fbd67a61083782808183e0" +"checksum md5 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a5539a8dee9b4ae308c9c406a379838b435a8f2c84cf9fedc6d5a576be9888db" +"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20" +"checksum num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "51fedae97a05f7353612fe017ab705a37e6db8f4d67c5c6fe739a9e70d6eed09" +"checksum regex 0.1.73 (registry+https://github.com/rust-lang/crates.io-index)" = "56b7ee9f764ecf412c6e2fff779bca4b22980517ae335a21aeaf4e32625a5df2" +"checksum regex-syntax 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "31040aad7470ad9d8c46302dcffba337bb4289ca5da2e3cd6e37b64109a85199" +"checksum rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "6159e4e6e559c81bd706afe9c8fd68f547d3e851ce12e76b1de7914bab61691b" +"checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03" +"checksum thread_local 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "55dd963dbaeadc08aa7266bf7f91c3154a7805e32bb94b820b769d2ef3b4744d" +"checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6" +"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f" +"checksum winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4dfaaa8fbdaa618fa6914b59b2769d690dd7521920a18d84b42d254678dd5fd4" +"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" diff --git a/src/bootstrap/bin/rustc.rs b/src/bootstrap/bin/rustc.rs index c64cbb9a74..a70a15b383 100644 --- a/src/bootstrap/bin/rustc.rs +++ b/src/bootstrap/bin/rustc.rs @@ -38,21 +38,27 @@ fn main() { // is passed (a bit janky...) let target = args.windows(2).find(|w| &*w[0] == "--target") .and_then(|w| w[1].to_str()); + let version = args.iter().find(|w| &**w == "-vV"); // Build scripts always use the snapshot compiler which is guaranteed to be // able to produce an executable, whereas intermediate compilers may not // have the standard library built yet and may not be able to produce an // executable. Otherwise we just use the standard compiler we're // bootstrapping with. - let (rustc, libdir) = if target.is_none() { + // + // Also note that cargo will detect the version of the compiler to trigger + // a rebuild when the compiler changes. If this happens, we want to make + // sure to use the actual compiler instead of the snapshot compiler becase + // that's the one that's actually changing. + let (rustc, libdir) = if target.is_none() && version.is_none() { ("RUSTC_SNAPSHOT", "RUSTC_SNAPSHOT_LIBDIR") } else { ("RUSTC_REAL", "RUSTC_LIBDIR") }; - let stage = env::var("RUSTC_STAGE").unwrap(); + let stage = env::var("RUSTC_STAGE").expect("RUSTC_STAGE was not set"); - let rustc = env::var_os(rustc).unwrap(); - let libdir = env::var_os(libdir).unwrap(); + let rustc = env::var_os(rustc).unwrap_or_else(|| panic!("{:?} was not set", rustc)); + let libdir = env::var_os(libdir).unwrap_or_else(|| panic!("{:?} was not set", libdir)); let mut dylib_path = bootstrap::util::dylib_path(); dylib_path.insert(0, PathBuf::from(libdir)); @@ -65,7 +71,7 @@ fn main() { if let Some(target) = target { // The stage0 compiler has a special sysroot distinct from what we // actually downloaded, so we just always pass the `--sysroot` option. - cmd.arg("--sysroot").arg(env::var_os("RUSTC_SYSROOT").unwrap()); + cmd.arg("--sysroot").arg(env::var_os("RUSTC_SYSROOT").expect("RUSTC_SYSROOT was not set")); // When we build Rust dylibs they're all intended for intermediate // usage, so make sure we pass the -Cprefer-dynamic flag instead of diff --git a/src/bootstrap/bin/rustdoc.rs b/src/bootstrap/bin/rustdoc.rs index 79629bfd71..658ff358d6 100644 --- a/src/bootstrap/bin/rustdoc.rs +++ b/src/bootstrap/bin/rustdoc.rs @@ -20,15 +20,16 @@ use std::path::PathBuf; fn main() { let args = env::args_os().skip(1).collect::>(); - let rustdoc = env::var_os("RUSTDOC_REAL").unwrap(); - let libdir = env::var_os("RUSTC_LIBDIR").unwrap(); + let rustdoc = env::var_os("RUSTDOC_REAL").expect("RUSTDOC_REAL was not set"); + let libdir = env::var_os("RUSTC_LIBDIR").expect("RUSTC_LIBDIR was not set"); + let stage = env::var("RUSTC_STAGE").expect("RUSTC_STAGE was not set"); let mut dylib_path = bootstrap::util::dylib_path(); dylib_path.insert(0, PathBuf::from(libdir)); let mut cmd = Command::new(rustdoc); cmd.args(&args) - .arg("--cfg").arg(format!("stage{}", env::var("RUSTC_STAGE").unwrap())) + .arg("--cfg").arg(format!("stage{}", stage)) .arg("--cfg").arg("dox") .env(bootstrap::util::dylib_path_var(), env::join_paths(&dylib_path).unwrap()); @@ -37,4 +38,3 @@ fn main() { Err(e) => panic!("\n\nfailed to run {:?}: {}\n\n", cmd, e), }) } - diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py index 17a7c9ca66..2c2260a8e6 100644 --- a/src/bootstrap/bootstrap.py +++ b/src/bootstrap/bootstrap.py @@ -131,7 +131,8 @@ def stage0_data(rust_root): def format_build_time(duration): return str(datetime.timedelta(seconds=int(duration))) -class RustBuild: + +class RustBuild(object): def download_stage0(self): cache_dst = os.path.join(self.build_dir, "cache") rustc_cache = os.path.join(cache_dst, self.stage0_rustc_date()) @@ -142,7 +143,7 @@ class RustBuild: os.makedirs(cargo_cache) if self.rustc().startswith(self.bin_root()) and \ - (not os.path.exists(self.rustc()) or self.rustc_out_of_date()): + (not os.path.exists(self.rustc()) or self.rustc_out_of_date()): if os.path.exists(self.bin_root()): shutil.rmtree(self.bin_root()) channel = self.stage0_rustc_channel() @@ -165,7 +166,7 @@ class RustBuild: f.write(self.stage0_rustc_date()) if self.cargo().startswith(self.bin_root()) and \ - (not os.path.exists(self.cargo()) or self.cargo_out_of_date()): + (not os.path.exists(self.cargo()) or self.cargo_out_of_date()): channel = self.stage0_cargo_channel() filename = "cargo-{}-{}.tar.gz".format(channel, self.build) url = "https://static.rust-lang.org/cargo-dist/" + self.stage0_cargo_date() @@ -238,8 +239,8 @@ class RustBuild: def get_string(self, line): start = line.find('"') - end = start + 1 + line[start+1:].find('"') - return line[start+1:end] + end = start + 1 + line[start + 1:].find('"') + return line[start + 1:end] def exe_suffix(self): if sys.platform == 'win32': @@ -269,6 +270,7 @@ class RustBuild: sys.exit(ret) def build_triple(self): + default_encoding = sys.getdefaultencoding() config = self.get_toml('build') if config: return config @@ -276,8 +278,8 @@ class RustBuild: if config: return config try: - ostype = subprocess.check_output(['uname', '-s']).strip() - cputype = subprocess.check_output(['uname', '-m']).strip() + ostype = subprocess.check_output(['uname', '-s']).strip().decode(default_encoding) + cputype = subprocess.check_output(['uname', '-m']).strip().decode(default_encoding) except (subprocess.CalledProcessError, WindowsError): if sys.platform == 'win32': return 'x86_64-pc-windows-msvc' @@ -289,7 +291,8 @@ class RustBuild: # Darwin's `uname -s` lies and always returns i386. We have to use # sysctl instead. if ostype == 'Darwin' and cputype == 'i686': - sysctl = subprocess.check_output(['sysctl', 'hw.optional.x86_64']) + args = ['sysctl', 'hw.optional.x86_64'] + sysctl = subprocess.check_output(args).decode(default_encoding) if ': 1' in sysctl: cputype = 'x86_64' diff --git a/src/bootstrap/check.rs b/src/bootstrap/check.rs index 3d8b143812..603a5ce752 100644 --- a/src/bootstrap/check.rs +++ b/src/bootstrap/check.rs @@ -148,6 +148,9 @@ pub fn compiletest(build: &Build, if let Some(ref dir) = build.lldb_python_dir { cmd.arg("--lldb-python-dir").arg(dir); } + let llvm_config = build.llvm_config(target); + let llvm_version = output(Command::new(&llvm_config).arg("--version")); + cmd.arg("--llvm-version").arg(llvm_version); cmd.args(&build.flags.args); @@ -158,7 +161,6 @@ pub fn compiletest(build: &Build, // Only pass correct values for these flags for the `run-make` suite as it // requires that a C++ compiler was configured which isn't always the case. if suite == "run-make" { - let llvm_config = build.llvm_config(target); let llvm_components = output(Command::new(&llvm_config).arg("--components")); let llvm_cxxflags = output(Command::new(&llvm_config).arg("--cxxflags")); cmd.arg("--cc").arg(build.cc(target)) @@ -183,7 +185,7 @@ pub fn compiletest(build: &Build, } } } - build.add_bootstrap_key(compiler, &mut cmd); + build.add_bootstrap_key(&mut cmd); cmd.arg("--adb-path").arg("adb"); cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR); diff --git a/src/bootstrap/clean.rs b/src/bootstrap/clean.rs index a466e2e689..a1e286e162 100644 --- a/src/bootstrap/clean.rs +++ b/src/bootstrap/clean.rs @@ -28,7 +28,6 @@ pub fn clean(build: &Build) { let out = build.out.join(host); - rm_rf(build, &out.join("compiler-rt")); rm_rf(build, &out.join("doc")); for stage in 0..4 { diff --git a/src/bootstrap/compile.rs b/src/bootstrap/compile.rs index 155848901c..239db55ff5 100644 --- a/src/bootstrap/compile.rs +++ b/src/bootstrap/compile.rs @@ -16,12 +16,14 @@ //! compiler. This module is also responsible for assembling the sysroot as it //! goes along from the output of the previous stage. +use std::cmp; use std::collections::HashMap; -use std::fs; +use std::fs::{self, File}; use std::path::{Path, PathBuf}; use std::process::Command; use build_helper::output; +use filetime::FileTime; use util::{exe, staticlib, libdir, mtime, is_dylib, copy}; use {Build, Compiler, Mode}; @@ -35,13 +37,23 @@ pub fn std<'a>(build: &'a Build, target: &str, compiler: &Compiler<'a>) { println!("Building stage{} std artifacts ({} -> {})", compiler.stage, compiler.host, target); - // Move compiler-rt into place as it'll be required by the compiler when - // building the standard library to link the dylib of libstd let libdir = build.sysroot_libdir(compiler, target); let _ = fs::remove_dir_all(&libdir); t!(fs::create_dir_all(&libdir)); - copy(&build.compiler_rt_built.borrow()[target], - &libdir.join(staticlib("compiler-rt", target))); + // FIXME(stage0) remove this `if` after the next snapshot + // The stage0 compiler still passes the `-lcompiler-rt` flag to the linker but now `bootstrap` + // never builds a `libcopmiler-rt.a`! We'll fill the hole by simply copying stage0's + // `libcompiler-rt.a` to where the stage1's one is expected (though we could as well just use + // an empty `.a` archive). Note that the symbols of that stage0 `libcompiler-rt.a` won't make + // it to the final binary because now `libcore.rlib` also contains the symbols that + // `libcompiler-rt.a` provides. Since that rlib appears first in the linker arguments, its + // symbols are used instead of `libcompiler-rt.a`'s. + if compiler.stage == 0 { + let rtlib = &staticlib("compiler-rt", target); + let src = build.rustc.parent().unwrap().parent().unwrap().join("lib").join("rustlib") + .join(target).join("lib").join(rtlib); + copy(&src, &libdir.join(rtlib)); + } // Some platforms have startup objects that may be required to produce the // libstd dynamic library, for example. @@ -59,13 +71,14 @@ pub fn std<'a>(build: &'a Build, target: &str, compiler: &Compiler<'a>) { cargo.env("JEMALLOC_OVERRIDE", jemalloc); } } - if let Some(ref p) = build.config.musl_root { - if target.contains("musl") { + if target.contains("musl") { + if let Some(p) = build.musl_root(target) { cargo.env("MUSL_ROOT", p); } } build.run(&mut cargo); + update_mtime(&libstd_stamp(build, compiler, target)); std_link(build, target, compiler, compiler.host); } @@ -83,26 +96,24 @@ pub fn std_link(build: &Build, // If we're linking one compiler host's output into another, then we weren't // called from the `std` method above. In that case we clean out what's - // already there and then also link compiler-rt into place. + // already there. if host != compiler.host { let _ = fs::remove_dir_all(&libdir); t!(fs::create_dir_all(&libdir)); - copy(&build.compiler_rt_built.borrow()[target], - &libdir.join(staticlib("compiler-rt", target))); } add_to_sysroot(&out_dir, &libdir); if target.contains("musl") && !target.contains("mips") { - copy_third_party_objects(build, target, &libdir); + copy_musl_third_party_objects(build, &libdir); } } /// Copies the crt(1,i,n).o startup objects /// /// Only required for musl targets that statically link to libc -fn copy_third_party_objects(build: &Build, target: &str, into: &Path) { +fn copy_musl_third_party_objects(build: &Build, into: &Path) { for &obj in &["crt1.o", "crti.o", "crtn.o"] { - copy(&compiler_file(build.cc(target), obj), &into.join(obj)); + copy(&build.config.musl_root.as_ref().unwrap().join("lib").join(obj), &into.join(obj)); } } @@ -117,14 +128,16 @@ fn build_startup_objects(build: &Build, target: &str, into: &Path) { return } let compiler = Compiler::new(0, &build.config.build); - let compiler = build.compiler_path(&compiler); + let compiler_path = build.compiler_path(&compiler); for file in t!(fs::read_dir(build.src.join("src/rtstartup"))) { let file = t!(file); - build.run(Command::new(&compiler) - .arg("--emit=obj") - .arg("--out-dir").arg(into) - .arg(file.path())); + let mut cmd = Command::new(&compiler_path); + build.add_bootstrap_key(&mut cmd); + build.run(cmd.arg("--target").arg(target) + .arg("--emit=obj") + .arg("--out-dir").arg(into) + .arg(file.path())); } for obj in ["crt2.o", "dllcrt2.o"].iter() { @@ -141,11 +154,12 @@ pub fn test<'a>(build: &'a Build, target: &str, compiler: &Compiler<'a>) { println!("Building stage{} test artifacts ({} -> {})", compiler.stage, compiler.host, target); let out_dir = build.cargo_out(compiler, Mode::Libtest, target); - build.clear_if_dirty(&out_dir, &libstd_shim(build, compiler, target)); + build.clear_if_dirty(&out_dir, &libstd_stamp(build, compiler, target)); let mut cargo = build.cargo(compiler, Mode::Libtest, target, "build"); cargo.arg("--manifest-path") .arg(build.src.join("src/rustc/test_shim/Cargo.toml")); build.run(&mut cargo); + update_mtime(&libtest_stamp(build, compiler, target)); test_link(build, target, compiler, compiler.host); } @@ -173,7 +187,7 @@ pub fn rustc<'a>(build: &'a Build, target: &str, compiler: &Compiler<'a>) { compiler.stage, compiler.host, target); let out_dir = build.cargo_out(compiler, Mode::Librustc, target); - build.clear_if_dirty(&out_dir, &libtest_shim(build, compiler, target)); + build.clear_if_dirty(&out_dir, &libtest_stamp(build, compiler, target)); let mut cargo = build.cargo(compiler, Mode::Librustc, target, "build"); cargo.arg("--features").arg(build.rustc_features()) @@ -185,7 +199,6 @@ pub fn rustc<'a>(build: &'a Build, target: &str, compiler: &Compiler<'a>) { cargo.env("CFG_RELEASE", &build.release) .env("CFG_RELEASE_CHANNEL", &build.config.channel) .env("CFG_VERSION", &build.version) - .env("CFG_BOOTSTRAP_KEY", &build.bootstrap_key) .env("CFG_PREFIX", build.config.prefix.clone().unwrap_or(String::new())) .env("CFG_LIBDIR_RELATIVE", "lib"); @@ -203,6 +216,10 @@ pub fn rustc<'a>(build: &'a Build, target: &str, compiler: &Compiler<'a>) { cargo.env("LLVM_RUSTLLVM", "1"); } cargo.env("LLVM_CONFIG", build.llvm_config(target)); + let target_config = build.config.target_config.get(target); + if let Some(s) = target_config.and_then(|c| c.llvm_config.as_ref()) { + cargo.env("CFG_LLVM_ROOT", s); + } if build.config.llvm_static_stdcpp { cargo.env("LLVM_STATIC_STDCPP", compiler_file(build.cxx(target), "libstdc++.a")); @@ -234,14 +251,14 @@ pub fn rustc_link(build: &Build, /// Cargo's output path for the standard library in a given stage, compiled /// by a particular compiler for the specified target. -fn libstd_shim(build: &Build, compiler: &Compiler, target: &str) -> PathBuf { - build.cargo_out(compiler, Mode::Libstd, target).join("libstd_shim.rlib") +fn libstd_stamp(build: &Build, compiler: &Compiler, target: &str) -> PathBuf { + build.cargo_out(compiler, Mode::Libstd, target).join(".libstd.stamp") } /// Cargo's output path for libtest in a given stage, compiled by a particular /// compiler for the specified target. -fn libtest_shim(build: &Build, compiler: &Compiler, target: &str) -> PathBuf { - build.cargo_out(compiler, Mode::Libtest, target).join("libtest_shim.rlib") +fn libtest_stamp(build: &Build, compiler: &Compiler, target: &str) -> PathBuf { + build.cargo_out(compiler, Mode::Libtest, target).join(".libtest.stamp") } fn compiler_file(compiler: &Path, file: &str) -> PathBuf { @@ -354,10 +371,35 @@ pub fn tool(build: &Build, stage: u32, host: &str, tool: &str) { // Maybe when libstd is compiled it should clear out the rustc of the // corresponding stage? // let out_dir = build.cargo_out(stage, &host, Mode::Librustc, target); - // build.clear_if_dirty(&out_dir, &libstd_shim(build, stage, &host, target)); + // build.clear_if_dirty(&out_dir, &libstd_stamp(build, stage, &host, target)); let mut cargo = build.cargo(&compiler, Mode::Tool, host, "build"); cargo.arg("--manifest-path") .arg(build.src.join(format!("src/tools/{}/Cargo.toml", tool))); build.run(&mut cargo); } + +/// Updates the mtime of a stamp file if necessary, only changing it if it's +/// older than some other file in the same directory. +/// +/// We don't know what file Cargo is going to output (because there's a hash in +/// the file name) but we know where it's going to put it. We use this helper to +/// detect changes to that output file by looking at the modification time for +/// all files in a directory and updating the stamp if any are newer. +fn update_mtime(path: &Path) { + let mut max = None; + if let Ok(entries) = path.parent().unwrap().read_dir() { + for entry in entries.map(|e| t!(e)) { + if t!(entry.file_type()).is_file() { + let meta = t!(entry.metadata()); + let time = FileTime::from_last_modification_time(&meta); + max = cmp::max(max, Some(time)); + } + } + } + + if !max.is_none() && max <= Some(mtime(path)) { + return + } + t!(File::create(path)); +} diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index aafbf68d1b..0f69bcfbb6 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -76,8 +76,11 @@ pub struct Config { // misc pub channel: String, + // Fallback musl-root for all targets pub musl_root: Option, pub prefix: Option, + pub codegen_tests: bool, + pub nodejs: Option, } /// Per-target configuration stored in the global configuration structure. @@ -88,6 +91,7 @@ pub struct Target { pub cc: Option, pub cxx: Option, pub ndk: Option, + pub musl_root: Option, } /// Structure of the `config.toml` file that configuration is read from. @@ -143,6 +147,7 @@ struct Rust { rpath: Option, optimize_tests: Option, debuginfo_tests: Option, + codegen_tests: Option, } /// TOML representation of how each build target is configured. @@ -169,6 +174,7 @@ impl Config { config.rust_codegen_units = 1; config.build = build.to_string(); config.channel = "dev".to_string(); + config.codegen_tests = true; let toml = file.map(|file| { let mut f = t!(File::open(&file)); @@ -230,6 +236,7 @@ impl Config { set(&mut config.rust_optimize, rust.optimize); set(&mut config.rust_optimize_tests, rust.optimize_tests); set(&mut config.rust_debuginfo_tests, rust.debuginfo_tests); + set(&mut config.codegen_tests, rust.codegen_tests); set(&mut config.rust_rpath, rust.rpath); set(&mut config.debug_jemalloc, rust.debug_jemalloc); set(&mut config.use_jemalloc, rust.use_jemalloc); @@ -322,6 +329,7 @@ impl Config { ("DEBUGINFO_TESTS", self.rust_debuginfo_tests), ("LOCAL_REBUILD", self.local_rebuild), ("NINJA", self.ninja), + ("CODEGEN_TESTS", self.codegen_tests), } match key { @@ -388,6 +396,9 @@ impl Config { self.rustc = Some(PathBuf::from(value).join("bin/rustc")); self.cargo = Some(PathBuf::from(value).join("bin/cargo")); } + "CFG_NODEJS" if value.len() > 0 => { + self.nodejs = Some(PathBuf::from(value)); + } _ => {} } } diff --git a/src/bootstrap/config.toml.example b/src/bootstrap/config.toml.example index 2894adafef..f054b29d0b 100644 --- a/src/bootstrap/config.toml.example +++ b/src/bootstrap/config.toml.example @@ -1,5 +1,8 @@ # Sample TOML configuration file for building Rust. # +# To configure rustbuild, copy this file to the directory from which you will be +# running the build, and name it config.toml. +# # All options are commented out by default in this file, and they're commented # out with their default values. The build system by default looks for # `config.toml` in the current directory of a build for build configuration, but @@ -115,10 +118,6 @@ # nightly features #channel = "dev" -# The root location of the MUSL installation directory. The library directory -# will also need to contain libunwind.a for an unwinding implementation. -#musl-root = "..." - # By default the `rustc` executable is built with `-Wl,-rpath` flags on Unix # platforms to ensure that the compiler is usable by default from the build # directory (as it links to a number of dynamic libraries). This may not be @@ -130,6 +129,10 @@ #optimize-tests = true #debuginfo-tests = true +# Flag indicating whether codegen tests will be run or not. If you get an error +# saying that the FileCheck executable is missing, you may want to disable this. +#codegen-tests = true + # ============================================================================= # Options for specific targets # @@ -160,3 +163,9 @@ # the NDK for the target lives. This is used to find the C compiler to link and # build native code. #android-ndk = "/path/to/ndk" + +# The root location of the MUSL installation directory. The library directory +# will also need to contain libunwind.a for an unwinding implementation. Note +# that this option only makes sense for MUSL targets that produce statically +# linked binaries +#musl-root = "..." diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs index 9d18901eb0..31b7db168b 100644 --- a/src/bootstrap/dist.rs +++ b/src/bootstrap/dist.rs @@ -388,6 +388,9 @@ pub fn rust_src(build: &Build) { // Rename directory, so that root folder of tarball has the correct name t!(fs::rename(&dst_src, &plain_dst_src)); + // Create the version file + write_file(&plain_dst_src.join("version"), build.version.as_bytes()); + // Create plain source tarball let mut cmd = Command::new("tar"); cmd.arg("-czf").arg(sanitize_sh(&distdir(build).join(&format!("{}.tar.gz", plain_name)))) @@ -431,3 +434,8 @@ fn sanitize_sh(path: &Path) -> String { Some(format!("/{}/{}", drive, &s[drive.len_utf8() + 2..])) } } + +fn write_file(path: &Path, data: &[u8]) { + let mut vf = t!(fs::File::create(path)); + t!(vf.write_all(data)); +} diff --git a/src/bootstrap/lib.rs b/src/bootstrap/lib.rs index 0af6082aee..32232cbee9 100644 --- a/src/bootstrap/lib.rs +++ b/src/bootstrap/lib.rs @@ -28,11 +28,10 @@ extern crate rustc_serialize; extern crate toml; extern crate regex; -use std::cell::RefCell; use std::collections::HashMap; use std::env; use std::fs::{self, File}; -use std::path::{PathBuf, Path}; +use std::path::{Component, PathBuf, Path}; use std::process::Command; use build_helper::{run_silent, output}; @@ -46,7 +45,7 @@ use util::{exe, mtime, libdir, add_lib_path}; /// * The error itself /// /// This is currently used judiciously throughout the build system rather than -/// using a `Result` with `try!`, but this may change on day... +/// using a `Result` with `try!`, but this may change one day... macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, @@ -131,7 +130,6 @@ pub struct Build { // Runtime state filled in later on cc: HashMap)>, cxx: HashMap, - compiler_rt_built: RefCell>, } /// The various "modes" of invoking Cargo. @@ -198,7 +196,6 @@ impl Build { package_vers: String::new(), cc: HashMap::new(), cxx: HashMap::new(), - compiler_rt_built: RefCell::new(HashMap::new()), gdb_version: None, lldb_version: None, lldb_python_dir: None, @@ -246,15 +243,19 @@ impl Build { // Almost all of these are simple one-liners that shell out to the // corresponding functionality in the extra modules, where more // documentation can be found. - for target in step::all(self) { + let steps = step::all(self); + + self.verbose("bootstrap build plan:"); + for step in &steps { + self.verbose(&format!("{:?}", step)); + } + + for target in steps { let doc_out = self.out.join(&target.target).join("doc"); match target.src { Llvm { _dummy } => { native::llvm(self, target.target); } - CompilerRt { _dummy } => { - native::compiler_rt(self, target.target); - } TestHelpers { _dummy } => { native::test_helpers(self, target.target); } @@ -308,10 +309,6 @@ impl Build { doc::rustbook(self, stage, target.target, "nomicon", &doc_out); } - DocStyle { stage } => { - doc::rustbook(self, stage, target.target, "style", - &doc_out); - } DocStandalone { stage } => { doc::standalone(self, stage, target.target, &doc_out); } @@ -394,8 +391,10 @@ impl Build { "mir-opt", "mir-opt"); } CheckCodegen { compiler } => { - check::compiletest(self, &compiler, target.target, - "codegen", "codegen"); + if self.config.codegen_tests { + check::compiletest(self, &compiler, target.target, + "codegen", "codegen"); + } } CheckCodegenUnits { compiler } => { check::compiletest(self, &compiler, target.target, @@ -479,12 +478,32 @@ impl Build { /// This will detect if any submodules are out of date an run the necessary /// commands to sync them all with upstream. fn update_submodules(&self) { + struct Submodule<'a> { + path: &'a Path, + state: State, + } + + enum State { + // The submodule may have staged/unstaged changes + MaybeDirty, + // Or could be initialized but never updated + NotInitialized, + // The submodule, itself, has extra commits but those changes haven't been commited to + // the (outer) git repository + OutOfSync, + } + if !self.config.submodules { return } if fs::metadata(self.src.join(".git")).is_err() { return } + let git = || { + let mut cmd = Command::new("git"); + cmd.current_dir(&self.src); + return cmd + }; let git_submodule = || { let mut cmd = Command::new("git"); cmd.current_dir(&self.src).arg("submodule"); @@ -496,19 +515,67 @@ impl Build { // of detecting whether we need to run all the submodule commands // below. let out = output(git_submodule().arg("status")); - if !out.lines().any(|l| l.starts_with("+") || l.starts_with("-")) { - return + let mut submodules = vec![]; + for line in out.lines() { + // NOTE `git submodule status` output looks like this: + // + // -5066b7dcab7e700844b0e2ba71b8af9dc627a59b src/liblibc + // +b37ef24aa82d2be3a3cc0fe89bf82292f4ca181c src/compiler-rt (remotes/origin/..) + // e058ca661692a8d01f8cf9d35939dfe3105ce968 src/jemalloc (3.6.0-533-ge058ca6) + // + // The first character can be '-', '+' or ' ' and denotes the `State` of the submodule + // Right next to this character is the SHA-1 of the submodule HEAD + // And after that comes the path to the submodule + let path = Path::new(line[1..].split(' ').skip(1).next().unwrap()); + let state = if line.starts_with('-') { + State::NotInitialized + } else if line.starts_with('+') { + State::OutOfSync + } else if line.starts_with(' ') { + State::MaybeDirty + } else { + panic!("unexpected git submodule state: {:?}", line.chars().next()); + }; + + submodules.push(Submodule { path: path, state: state }) } self.run(git_submodule().arg("sync")); - self.run(git_submodule().arg("init")); - self.run(git_submodule().arg("update")); - self.run(git_submodule().arg("update").arg("--recursive")); - self.run(git_submodule().arg("status").arg("--recursive")); - self.run(git_submodule().arg("foreach").arg("--recursive") - .arg("git").arg("clean").arg("-fdx")); - self.run(git_submodule().arg("foreach").arg("--recursive") - .arg("git").arg("checkout").arg(".")); + + for submodule in submodules { + // If using llvm-root then don't touch the llvm submodule. + if submodule.path.components().any(|c| c == Component::Normal("llvm".as_ref())) && + self.config.target_config.get(&self.config.build) + .and_then(|c| c.llvm_config.as_ref()).is_some() + { + continue + } + + if submodule.path.components().any(|c| c == Component::Normal("jemalloc".as_ref())) && + !self.config.use_jemalloc + { + continue + } + + match submodule.state { + State::MaybeDirty => { + // drop staged changes + self.run(git().arg("-C").arg(submodule.path).args(&["reset", "--hard"])); + // drops unstaged changes + self.run(git().arg("-C").arg(submodule.path).args(&["clean", "-fdx"])); + }, + State::NotInitialized => { + self.run(git_submodule().arg("init").arg(submodule.path)); + self.run(git_submodule().arg("update").arg(submodule.path)); + }, + State::OutOfSync => { + // drops submodule commits that weren't reported to the (outer) git repository + self.run(git_submodule().arg("update").arg(submodule.path)); + self.run(git().arg("-C").arg(submodule.path).args(&["reset", "--hard"])); + self.run(git().arg("-C").arg(submodule.path).args(&["clean", "-fdx"])); + }, + } + } } /// Clear out `dir` if `input` is newer. @@ -519,6 +586,8 @@ impl Build { if mtime(&stamp) < mtime(input) { self.verbose(&format!("Dirty - {}", dir.display())); let _ = fs::remove_dir_all(dir); + } else if stamp.exists() { + return } t!(fs::create_dir_all(dir)); t!(File::create(stamp)); @@ -543,6 +612,10 @@ impl Build { .arg("-j").arg(self.jobs().to_string()) .arg("--target").arg(target); + // FIXME: Temporary fix for https://github.com/rust-lang/cargo/issues/3005 + // Force cargo to output binaries with disambiguating hashes in the name + cargo.env("__CARGO_DEFAULT_LIB_METADATA", "1"); + let stage; if compiler.stage == 0 && self.local_rebuild { // Assume the local-rebuild rustc already has stage1 features. @@ -574,7 +647,7 @@ impl Build { .env("RUSTDOC_REAL", self.rustdoc(compiler)) .env("RUSTC_FLAGS", self.rustc_flags(target).join(" ")); - self.add_bootstrap_key(compiler, &mut cargo); + self.add_bootstrap_key(&mut cargo); // Specify some various options for build scripts used throughout // the build. @@ -769,11 +842,6 @@ impl Build { } } - /// Root output directory for compiler-rt compiled for `target` - fn compiler_rt_out(&self, target: &str) -> PathBuf { - self.out.join(target).join("compiler-rt") - } - /// Root output directory for rust_test_helpers library compiled for /// `target` fn test_helpers_out(&self, target: &str) -> PathBuf { @@ -794,16 +862,11 @@ impl Build { } /// Adds the compiler's bootstrap key to the environment of `cmd`. - fn add_bootstrap_key(&self, compiler: &Compiler, cmd: &mut Command) { - // In stage0 we're using a previously released stable compiler, so we - // use the stage0 bootstrap key. Otherwise we use our own build's - // bootstrap key. - let bootstrap_key = if compiler.is_snapshot(self) && !self.local_rebuild { - &self.bootstrap_key_stage0 - } else { - &self.bootstrap_key - }; - cmd.env("RUSTC_BOOTSTRAP_KEY", bootstrap_key); + fn add_bootstrap_key(&self, cmd: &mut Command) { + cmd.env("RUSTC_BOOTSTRAP", "1"); + // FIXME: Transitionary measure to bootstrap using the old bootstrap logic. + // Remove this once the bootstrap compiler uses the new login in Issue #36548. + cmd.env("RUSTC_BOOTSTRAP_KEY", "5c6cf767"); } /// Returns the compiler's libdir where it stores the dynamic libraries that @@ -889,7 +952,11 @@ impl Build { /// Returns the path to the C++ compiler for the target specified, may panic /// if no C++ compiler was configured for the target. fn cxx(&self, target: &str) -> &Path { - self.cxx[target].path() + match self.cxx.get(target) { + Some(p) => p.path(), + None => panic!("\n\ntarget `{}` is not configured as a host, + only as a target\n\n", target), + } } /// Returns flags to pass to the compiler to generate code for `target`. @@ -907,6 +974,13 @@ impl Build { } return base } + + /// Returns the "musl root" for this `target`, if defined + fn musl_root(&self, target: &str) -> Option<&Path> { + self.config.target_config[target].musl_root.as_ref() + .or(self.config.musl_root.as_ref()) + .map(|p| &**p) + } } impl<'a> Compiler<'a> { diff --git a/src/bootstrap/mk/Makefile.in b/src/bootstrap/mk/Makefile.in index c657785d78..cc44d45c2c 100644 --- a/src/bootstrap/mk/Makefile.in +++ b/src/bootstrap/mk/Makefile.in @@ -22,6 +22,10 @@ BOOTSTRAP := $(CFG_PYTHON) $(CFG_SRC_DIR)src/bootstrap/bootstrap.py $(BOOTSTRAP_ all: $(Q)$(BOOTSTRAP) +# Don’t use $(Q) here, always show how to invoke the bootstrap script directly +help: + $(BOOTSTRAP) --help + clean: $(Q)$(BOOTSTRAP) --clean diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index a78cef4f40..df6408e5fe 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -27,7 +27,7 @@ use cmake; use gcc; use Build; -use util::{staticlib, up_to_date}; +use util::up_to_date; /// Compile LLVM for `target`. pub fn llvm(build: &Build, target: &str) { @@ -65,7 +65,7 @@ pub fn llvm(build: &Build, target: &str) { .out_dir(&dst) .profile(if build.config.llvm_optimize {"Release"} else {"Debug"}) .define("LLVM_ENABLE_ASSERTIONS", assertions) - .define("LLVM_TARGETS_TO_BUILD", "X86;ARM;AArch64;Mips;PowerPC") + .define("LLVM_TARGETS_TO_BUILD", "X86;ARM;AArch64;Mips;PowerPC;SystemZ") .define("LLVM_INCLUDE_EXAMPLES", "OFF") .define("LLVM_INCLUDE_TESTS", "OFF") .define("LLVM_INCLUDE_DOCS", "OFF") @@ -131,401 +131,6 @@ fn check_llvm_version(build: &Build, llvm_config: &Path) { panic!("\n\nbad LLVM version: {}, need >=3.5\n\n", version) } -/// Compiles the `compiler-rt` library, or at least the builtins part of it. -/// -/// Note that while compiler-rt has a build system associated with it, we -/// specifically don't use it here. The compiler-rt build system, written in -/// CMake, is actually *very* difficult to work with in terms of getting it to -/// compile on all the relevant platforms we want it to compile on. In the end -/// it became so much pain to work with local patches, work around the oddities -/// of the build system, etc, that we're just building everything by hand now. -/// -/// In general compiler-rt is just a bunch of intrinsics that are in practice -/// *very* stable. We just need to make sure that all the relevant functions and -/// such are compiled somewhere and placed in an object file somewhere. -/// Eventually, these should all be written in Rust! -/// -/// So below you'll find a listing of every single file in the compiler-rt repo -/// that we're compiling. We just reach in and compile with the `gcc` crate -/// which should have all the relevant flags and such already configured. -/// -/// The risk here is that if we update compiler-rt we may need to compile some -/// new intrinsics, but to be honest we surely don't use all of the intrinsics -/// listed below today so the likelihood of us actually needing a new intrinsic -/// is quite low. The failure case is also just that someone reports a link -/// error (if any) and then we just add it to the list. Overall, that cost is -/// far far less than working with compiler-rt's build system over time. -pub fn compiler_rt(build: &Build, target: &str) { - let build_dir = build.compiler_rt_out(target); - let output = build_dir.join(staticlib("compiler-rt", target)); - build.compiler_rt_built.borrow_mut().insert(target.to_string(), - output.clone()); - t!(fs::create_dir_all(&build_dir)); - - let mut cfg = gcc::Config::new(); - cfg.cargo_metadata(false) - .out_dir(&build_dir) - .target(target) - .host(&build.config.build) - .opt_level(2) - .debug(false); - - if target.contains("msvc") { - // Don't pull in extra libraries on MSVC - cfg.flag("/Zl"); - - // Emulate C99 and C++11's __func__ for MSVC prior to 2013 CTP - cfg.define("__func__", Some("__FUNCTION__")); - } else { - // Turn off various features of gcc and such, mostly copying - // compiler-rt's build system already - cfg.flag("-fno-builtin"); - cfg.flag("-fvisibility=hidden"); - cfg.flag("-fomit-frame-pointer"); - cfg.flag("-ffreestanding"); - } - - let mut sources = vec![ - "absvdi2.c", - "absvsi2.c", - "adddf3.c", - "addsf3.c", - "addvdi3.c", - "addvsi3.c", - "apple_versioning.c", - "ashldi3.c", - "ashrdi3.c", - "clear_cache.c", - "clzdi2.c", - "clzsi2.c", - "cmpdi2.c", - "comparedf2.c", - "comparesf2.c", - "ctzdi2.c", - "ctzsi2.c", - "divdc3.c", - "divdf3.c", - "divdi3.c", - "divmoddi4.c", - "divmodsi4.c", - "divsc3.c", - "divsf3.c", - "divsi3.c", - "divxc3.c", - "extendsfdf2.c", - "extendhfsf2.c", - "ffsdi2.c", - "fixdfdi.c", - "fixdfsi.c", - "fixsfdi.c", - "fixsfsi.c", - "fixunsdfdi.c", - "fixunsdfsi.c", - "fixunssfdi.c", - "fixunssfsi.c", - "fixunsxfdi.c", - "fixunsxfsi.c", - "fixxfdi.c", - "floatdidf.c", - "floatdisf.c", - "floatdixf.c", - "floatsidf.c", - "floatsisf.c", - "floatundidf.c", - "floatundisf.c", - "floatundixf.c", - "floatunsidf.c", - "floatunsisf.c", - "int_util.c", - "lshrdi3.c", - "moddi3.c", - "modsi3.c", - "muldc3.c", - "muldf3.c", - "muldi3.c", - "mulodi4.c", - "mulosi4.c", - "muloti4.c", - "mulsc3.c", - "mulsf3.c", - "mulvdi3.c", - "mulvsi3.c", - "mulxc3.c", - "negdf2.c", - "negdi2.c", - "negsf2.c", - "negvdi2.c", - "negvsi2.c", - "paritydi2.c", - "paritysi2.c", - "popcountdi2.c", - "popcountsi2.c", - "powidf2.c", - "powisf2.c", - "powixf2.c", - "subdf3.c", - "subsf3.c", - "subvdi3.c", - "subvsi3.c", - "truncdfhf2.c", - "truncdfsf2.c", - "truncsfhf2.c", - "ucmpdi2.c", - "udivdi3.c", - "udivmoddi4.c", - "udivmodsi4.c", - "udivsi3.c", - "umoddi3.c", - "umodsi3.c", - ]; - - if !target.contains("ios") { - sources.extend(vec![ - "absvti2.c", - "addtf3.c", - "addvti3.c", - "ashlti3.c", - "ashrti3.c", - "clzti2.c", - "cmpti2.c", - "ctzti2.c", - "divtf3.c", - "divti3.c", - "ffsti2.c", - "fixdfti.c", - "fixsfti.c", - "fixunsdfti.c", - "fixunssfti.c", - "fixunsxfti.c", - "fixxfti.c", - "floattidf.c", - "floattisf.c", - "floattixf.c", - "floatuntidf.c", - "floatuntisf.c", - "floatuntixf.c", - "lshrti3.c", - "modti3.c", - "multf3.c", - "multi3.c", - "mulvti3.c", - "negti2.c", - "negvti2.c", - "parityti2.c", - "popcountti2.c", - "powitf2.c", - "subtf3.c", - "subvti3.c", - "trampoline_setup.c", - "ucmpti2.c", - "udivmodti4.c", - "udivti3.c", - "umodti3.c", - ]); - } - - if target.contains("apple") { - sources.extend(vec![ - "atomic_flag_clear.c", - "atomic_flag_clear_explicit.c", - "atomic_flag_test_and_set.c", - "atomic_flag_test_and_set_explicit.c", - "atomic_signal_fence.c", - "atomic_thread_fence.c", - ]); - } - - if !target.contains("windows") { - sources.push("emutls.c"); - } - - if target.contains("msvc") { - if target.contains("x86_64") { - sources.extend(vec![ - "x86_64/floatdidf.c", - "x86_64/floatdisf.c", - "x86_64/floatdixf.c", - ]); - } - } else { - if !target.contains("freebsd") { - sources.push("gcc_personality_v0.c"); - } - - if target.contains("x86_64") { - sources.extend(vec![ - "x86_64/chkstk.S", - "x86_64/chkstk2.S", - "x86_64/floatdidf.c", - "x86_64/floatdisf.c", - "x86_64/floatdixf.c", - "x86_64/floatundidf.S", - "x86_64/floatundisf.S", - "x86_64/floatundixf.S", - ]); - } - - if target.contains("i386") || - target.contains("i586") || - target.contains("i686") { - sources.extend(vec![ - "i386/ashldi3.S", - "i386/ashrdi3.S", - "i386/chkstk.S", - "i386/chkstk2.S", - "i386/divdi3.S", - "i386/floatdidf.S", - "i386/floatdisf.S", - "i386/floatdixf.S", - "i386/floatundidf.S", - "i386/floatundisf.S", - "i386/floatundixf.S", - "i386/lshrdi3.S", - "i386/moddi3.S", - "i386/muldi3.S", - "i386/udivdi3.S", - "i386/umoddi3.S", - ]); - } - } - - if target.contains("arm") && !target.contains("ios") { - sources.extend(vec![ - "arm/aeabi_cdcmp.S", - "arm/aeabi_cdcmpeq_check_nan.c", - "arm/aeabi_cfcmp.S", - "arm/aeabi_cfcmpeq_check_nan.c", - "arm/aeabi_dcmp.S", - "arm/aeabi_div0.c", - "arm/aeabi_drsub.c", - "arm/aeabi_fcmp.S", - "arm/aeabi_frsub.c", - "arm/aeabi_idivmod.S", - "arm/aeabi_ldivmod.S", - "arm/aeabi_memcmp.S", - "arm/aeabi_memcpy.S", - "arm/aeabi_memmove.S", - "arm/aeabi_memset.S", - "arm/aeabi_uidivmod.S", - "arm/aeabi_uldivmod.S", - "arm/bswapdi2.S", - "arm/bswapsi2.S", - "arm/clzdi2.S", - "arm/clzsi2.S", - "arm/comparesf2.S", - "arm/divmodsi4.S", - "arm/divsi3.S", - "arm/modsi3.S", - "arm/switch16.S", - "arm/switch32.S", - "arm/switch8.S", - "arm/switchu8.S", - "arm/sync_synchronize.S", - "arm/udivmodsi4.S", - "arm/udivsi3.S", - "arm/umodsi3.S", - ]); - } - - if target.contains("armv7") { - sources.extend(vec![ - "arm/sync_fetch_and_add_4.S", - "arm/sync_fetch_and_add_8.S", - "arm/sync_fetch_and_and_4.S", - "arm/sync_fetch_and_and_8.S", - "arm/sync_fetch_and_max_4.S", - "arm/sync_fetch_and_max_8.S", - "arm/sync_fetch_and_min_4.S", - "arm/sync_fetch_and_min_8.S", - "arm/sync_fetch_and_nand_4.S", - "arm/sync_fetch_and_nand_8.S", - "arm/sync_fetch_and_or_4.S", - "arm/sync_fetch_and_or_8.S", - "arm/sync_fetch_and_sub_4.S", - "arm/sync_fetch_and_sub_8.S", - "arm/sync_fetch_and_umax_4.S", - "arm/sync_fetch_and_umax_8.S", - "arm/sync_fetch_and_umin_4.S", - "arm/sync_fetch_and_umin_8.S", - "arm/sync_fetch_and_xor_4.S", - "arm/sync_fetch_and_xor_8.S", - ]); - } - - if target.contains("eabihf") { - sources.extend(vec![ - "arm/adddf3vfp.S", - "arm/addsf3vfp.S", - "arm/divdf3vfp.S", - "arm/divsf3vfp.S", - "arm/eqdf2vfp.S", - "arm/eqsf2vfp.S", - "arm/extendsfdf2vfp.S", - "arm/fixdfsivfp.S", - "arm/fixsfsivfp.S", - "arm/fixunsdfsivfp.S", - "arm/fixunssfsivfp.S", - "arm/floatsidfvfp.S", - "arm/floatsisfvfp.S", - "arm/floatunssidfvfp.S", - "arm/floatunssisfvfp.S", - "arm/gedf2vfp.S", - "arm/gesf2vfp.S", - "arm/gtdf2vfp.S", - "arm/gtsf2vfp.S", - "arm/ledf2vfp.S", - "arm/lesf2vfp.S", - "arm/ltdf2vfp.S", - "arm/ltsf2vfp.S", - "arm/muldf3vfp.S", - "arm/mulsf3vfp.S", - "arm/negdf2vfp.S", - "arm/negsf2vfp.S", - "arm/nedf2vfp.S", - "arm/nesf2vfp.S", - "arm/restore_vfp_d8_d15_regs.S", - "arm/save_vfp_d8_d15_regs.S", - "arm/subdf3vfp.S", - "arm/subsf3vfp.S", - "arm/truncdfsf2vfp.S", - "arm/unorddf2vfp.S", - "arm/unordsf2vfp.S", - ]); - } - - if target.contains("aarch64") { - sources.extend(vec![ - "comparetf2.c", - "extenddftf2.c", - "extendsftf2.c", - "fixtfdi.c", - "fixtfsi.c", - "fixtfti.c", - "fixunstfdi.c", - "fixunstfsi.c", - "fixunstfti.c", - "floatditf.c", - "floatsitf.c", - "floatunditf.c", - "floatunsitf.c", - "multc3.c", - "trunctfdf2.c", - "trunctfsf2.c", - ]); - } - - let mut out_of_date = false; - for src in sources { - let src = build.src.join("src/compiler-rt/lib/builtins").join(src); - out_of_date = out_of_date || !up_to_date(&src, &output); - cfg.file(src); - } - if !out_of_date { - return - } - cfg.compile("libcompiler-rt.a"); -} - /// Compiles the `rust_test_helpers.c` library which we used in various /// `run-pass` test suites for ABI testing. pub fn test_helpers(build: &Build, target: &str) { diff --git a/src/bootstrap/sanity.rs b/src/bootstrap/sanity.rs index d6ac3ef6c9..05c35543e3 100644 --- a/src/bootstrap/sanity.rs +++ b/src/bootstrap/sanity.rs @@ -75,6 +75,12 @@ pub fn check(build: &mut Build) { need_cmd("python".as_ref()); + // If a manual nodejs was added to the config, + // of if a nodejs install is detected through config, use it. + if let Some(ref s) = build.config.nodejs { + need_cmd(s.as_ref()); + } + // We're gonna build some custom C code here and there, host triples // also build some C++ shims for LLVM so we need a C++ compiler. for target in build.config.target.iter() { @@ -89,7 +95,7 @@ pub fn check(build: &mut Build) { // Externally configured LLVM requires FileCheck to exist let filecheck = build.llvm_filecheck(&build.config.build); - if !filecheck.starts_with(&build.out) && !filecheck.exists() { + if !filecheck.starts_with(&build.out) && !filecheck.exists() && build.config.codegen_tests { panic!("filecheck executable {:?} does not exist", filecheck); } @@ -111,8 +117,8 @@ pub fn check(build: &mut Build) { // Make sure musl-root is valid if specified if target.contains("musl") && !target.contains("mips") { - match build.config.musl_root { - Some(ref root) => { + match build.musl_root(target) { + Some(root) => { if fs::metadata(root.join("lib/libc.a")).is_err() { panic!("couldn't find libc.a in musl dir: {}", root.join("lib").display()); @@ -123,8 +129,9 @@ pub fn check(build: &mut Build) { } } None => { - panic!("when targeting MUSL the build.musl-root option \ - must be specified in config.toml") + panic!("when targeting MUSL either the build.musl-root \ + option or the target.$TARGET.musl-root one must \ + be specified in config.toml") } } } diff --git a/src/bootstrap/step.rs b/src/bootstrap/step.rs index 8d3cb36166..4b5a26d205 100644 --- a/src/bootstrap/step.rs +++ b/src/bootstrap/step.rs @@ -82,7 +82,6 @@ macro_rules! targets { // There aren't really any parameters to this, but empty structs // with braces are unstable so we just pick something that works. (llvm, Llvm { _dummy: () }), - (compiler_rt, CompilerRt { _dummy: () }), (test_helpers, TestHelpers { _dummy: () }), (debugger_scripts, DebuggerScripts { stage: u32 }), @@ -92,7 +91,6 @@ macro_rules! targets { (doc, Doc { stage: u32 }), (doc_book, DocBook { stage: u32 }), (doc_nomicon, DocNomicon { stage: u32 }), - (doc_style, DocStyle { stage: u32 }), (doc_standalone, DocStandalone { stage: u32 }), (doc_std, DocStd { stage: u32 }), (doc_test, DocTest { stage: u32 }), @@ -173,6 +171,8 @@ targets!(define_source); /// into a topologically sorted list which when executed left-to-right will /// correctly sequence the entire build. pub fn all(build: &Build) -> Vec { + build.verbose("inferred build steps:"); + let mut ret = Vec::new(); let mut all = HashSet::new(); for target in top_level(build) { @@ -186,6 +186,7 @@ pub fn all(build: &Build) -> Vec { set: &mut HashSet>) { if set.insert(target.clone()) { for dep in target.deps(build) { + build.verbose(&format!("{:?}\n -> {:?}", target, dep)); fill(build, &dep, ret, set); } ret.push(target.clone()); @@ -335,8 +336,7 @@ impl<'a> Step<'a> { vec![self.libstd(compiler)] } Source::Libstd { compiler } => { - vec![self.compiler_rt(()), - self.rustc(compiler.stage).target(compiler.host)] + vec![self.rustc(compiler.stage).target(compiler.host)] } Source::LibrustcLink { compiler, host } => { vec![self.librustc(compiler), @@ -349,7 +349,6 @@ impl<'a> Step<'a> { vec![self.libstd(compiler), self.target(host).rustc(compiler.stage)] } - Source::CompilerRt { _dummy } => Vec::new(), Source::Llvm { _dummy } => Vec::new(), Source::TestHelpers { _dummy } => Vec::new(), Source::DebuggerScripts { stage: _ } => Vec::new(), @@ -366,8 +365,7 @@ impl<'a> Step<'a> { vec![self.libtest(compiler)] } Source::DocBook { stage } | - Source::DocNomicon { stage } | - Source::DocStyle { stage } => { + Source::DocNomicon { stage } => { vec![self.target(&build.config.build).tool_rustbook(stage)] } Source::DocErrorIndex { stage } => { @@ -382,8 +380,7 @@ impl<'a> Step<'a> { Source::Doc { stage } => { let mut deps = vec![ self.doc_book(stage), self.doc_nomicon(stage), - self.doc_style(stage), self.doc_standalone(stage), - self.doc_std(stage), + self.doc_standalone(stage), self.doc_std(stage), self.doc_error_index(stage), ]; diff --git a/src/bootstrap/util.rs b/src/bootstrap/util.rs index dfc1c7a243..6c0a32a54d 100644 --- a/src/bootstrap/util.rs +++ b/src/bootstrap/util.rs @@ -23,7 +23,7 @@ use filetime::FileTime; /// Returns the `name` as the filename of a static library for `target`. pub fn staticlib(name: &str, target: &str) -> String { - if target.contains("windows-msvc") { + if target.contains("windows") { format!("{}.lib", name) } else { format!("lib{}.a", name) diff --git a/src/compiler-rt/lib/builtins/floatsidf.c b/src/compiler-rt/lib/builtins/floatsidf.c index 1cf99b782a..2bf97ba0e3 100644 --- a/src/compiler-rt/lib/builtins/floatsidf.c +++ b/src/compiler-rt/lib/builtins/floatsidf.c @@ -24,27 +24,26 @@ COMPILER_RT_ABI fp_t __floatsidf(int a) { const int aWidth = sizeof a * CHAR_BIT; - + // Handle zero as a special case to protect clz if (a == 0) return fromRep(0); // All other cases begin by extracting the sign and absolute value of a rep_t sign = 0; + unsigned aAbs = (unsigned)a; if (a < 0) { sign = signBit; - a = -a; + aAbs = ~(unsigned)a + 1U; } // Exponent of (fp_t)a is the width of abs(a). - const int exponent = (aWidth - 1) - __builtin_clz(a); + const int exponent = (aWidth - 1) - __builtin_clz(aAbs); rep_t result; - // Shift a into the significand field and clear the implicit bit. Extra - // cast to unsigned int is necessary to get the correct behavior for - // the input INT_MIN. + // Shift a into the significand field and clear the implicit bit. const int shift = significandBits - exponent; - result = (rep_t)(unsigned int)a << shift ^ implicitBit; + result = (rep_t)aAbs << shift ^ implicitBit; // Insert the exponent result += (rep_t)(exponent + exponentBias) << significandBits; diff --git a/src/compiler-rt/lib/builtins/floatsisf.c b/src/compiler-rt/lib/builtins/floatsisf.c index 467dd1d1ea..0cbe8cee55 100644 --- a/src/compiler-rt/lib/builtins/floatsisf.c +++ b/src/compiler-rt/lib/builtins/floatsisf.c @@ -24,30 +24,31 @@ COMPILER_RT_ABI fp_t __floatsisf(int a) { const int aWidth = sizeof a * CHAR_BIT; - + // Handle zero as a special case to protect clz if (a == 0) return fromRep(0); // All other cases begin by extracting the sign and absolute value of a rep_t sign = 0; + unsigned aAbs = (unsigned)a; if (a < 0) { sign = signBit; - a = -a; + aAbs = ~(unsigned)a + 1U; } // Exponent of (fp_t)a is the width of abs(a). - const int exponent = (aWidth - 1) - __builtin_clz(a); + const int exponent = (aWidth - 1) - __builtin_clz(aAbs); rep_t result; // Shift a into the significand field, rounding if it is a right-shift if (exponent <= significandBits) { const int shift = significandBits - exponent; - result = (rep_t)a << shift ^ implicitBit; + result = (rep_t)aAbs << shift ^ implicitBit; } else { const int shift = exponent - significandBits; - result = (rep_t)a >> shift ^ implicitBit; - rep_t round = (rep_t)a << (typeWidth - shift); + result = (rep_t)aAbs >> shift ^ implicitBit; + rep_t round = (rep_t)aAbs << (typeWidth - shift); if (round > signBit) result++; if (round == signBit) result += result & 1; } diff --git a/src/compiler-rt/lib/builtins/int_lib.h b/src/compiler-rt/lib/builtins/int_lib.h index 8dfe5672d1..6cf17497d3 100644 --- a/src/compiler-rt/lib/builtins/int_lib.h +++ b/src/compiler-rt/lib/builtins/int_lib.h @@ -32,7 +32,7 @@ #if __ARM_EABI__ # define ARM_EABI_FNALIAS(aeabi_name, name) \ void __aeabi_##aeabi_name() __attribute__((alias("__" #name))); -# define COMPILER_RT_ABI __attribute__((pcs("aapcs"))) +# define COMPILER_RT_ABI #else # define ARM_EABI_FNALIAS(aeabi_name, name) # define COMPILER_RT_ABI diff --git a/src/doc/book/associated-types.md b/src/doc/book/associated-types.md index cb54ac2419..0998a88c4d 100644 --- a/src/doc/book/associated-types.md +++ b/src/doc/book/associated-types.md @@ -67,7 +67,7 @@ trait Graph { Simple enough. Associated types use the `type` keyword, and go inside the body of the trait, with the functions. -These `type` declarations can have all the same thing as functions do. For example, +These type declarations work the same way as those for functions. For example, if we wanted our `N` type to implement `Display`, so we can print the nodes out, we could do this: diff --git a/src/doc/book/borrow-and-asref.md b/src/doc/book/borrow-and-asref.md index 1cfeb2620b..c30b2e6866 100644 --- a/src/doc/book/borrow-and-asref.md +++ b/src/doc/book/borrow-and-asref.md @@ -8,7 +8,7 @@ different. Here’s a quick refresher on what these two traits mean. # Borrow -The `Borrow` trait is used when you’re writing a datastructure, and you want to +The `Borrow` trait is used when you’re writing a data structure, and you want to use either an owned or borrowed type as synonymous for some purpose. For example, [`HashMap`][hashmap] has a [`get` method][get] which uses `Borrow`: @@ -86,7 +86,7 @@ We can see how they’re kind of the same: they both deal with owned and borrowe versions of some type. However, they’re a bit different. Choose `Borrow` when you want to abstract over different kinds of borrowing, or -when you’re building a datastructure that treats owned and borrowed values in +when you’re building a data structure that treats owned and borrowed values in equivalent ways, such as hashing and comparison. Choose `AsRef` when you want to convert something to a reference directly, and diff --git a/src/doc/book/closures.md b/src/doc/book/closures.md index 1470eac982..3ed85c1a90 100644 --- a/src/doc/book/closures.md +++ b/src/doc/book/closures.md @@ -262,7 +262,7 @@ the result: ```rust fn call_with_one(some_closure: F) -> i32 - where F : Fn(i32) -> i32 { + where F: Fn(i32) -> i32 { some_closure(1) } @@ -279,7 +279,7 @@ Let’s examine the signature of `call_with_one` in more depth: ```rust fn call_with_one(some_closure: F) -> i32 -# where F : Fn(i32) -> i32 { +# where F: Fn(i32) -> i32 { # some_closure(1) } ``` @@ -288,7 +288,7 @@ isn’t interesting. The next part is: ```rust # fn call_with_one(some_closure: F) -> i32 - where F : Fn(i32) -> i32 { + where F: Fn(i32) -> i32 { # some_closure(1) } ``` @@ -340,7 +340,7 @@ fn call_with_ref<'a, F>(some_closure:F) -> i32 where F: Fn(&'a i32) -> i32 { ``` -However this presents a problem with in our case. When you specify the explicit +However this presents a problem in our case. When you specify the explicit lifetime on a function it binds that lifetime to the *entire* scope of the function instead of just the invocation scope of our closure. This means that the borrow checker will see a mutable reference in the same lifetime as our immutable reference and fail diff --git a/src/doc/book/compiler-plugins.md b/src/doc/book/compiler-plugins.md index 8426d5a626..a9a81843ab 100644 --- a/src/doc/book/compiler-plugins.md +++ b/src/doc/book/compiler-plugins.md @@ -46,10 +46,10 @@ extern crate rustc; extern crate rustc_plugin; use syntax::parse::token; -use syntax::ast::TokenTree; +use syntax::tokenstream::TokenTree; use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager}; use syntax::ext::build::AstBuilder; // trait for expr_usize -use syntax_pos::Span; +use syntax::ext::quote::rt::Span; use rustc_plugin::Registry; fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree]) @@ -69,7 +69,7 @@ fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree]) } let text = match args[0] { - TokenTree::Token(_, token::Ident(s, _)) => s.to_string(), + TokenTree::Token(_, token::Ident(s)) => s.to_string(), _ => { cx.span_err(sp, "argument should be a single identifier"); return DummyResult::any(sp); diff --git a/src/doc/book/error-handling.md b/src/doc/book/error-handling.md index 6e13b464e4..a62e1b7dfa 100644 --- a/src/doc/book/error-handling.md +++ b/src/doc/book/error-handling.md @@ -59,7 +59,7 @@ handling is reducing the amount of explicit case analysis the programmer has to do while keeping code composable. Keeping code composable is important, because without that requirement, we -could [`panic`](../std/macro.panic!.html) whenever we +could [`panic`](../std/macro.panic.html) whenever we come across something unexpected. (`panic` causes the current task to unwind, and in most cases, the entire program aborts.) Here's an example: @@ -944,7 +944,7 @@ macro_rules! try { } ``` -(The [real definition](../std/macro.try!.html) is a bit more +(The [real definition](../std/macro.try.html) is a bit more sophisticated. We will address that later.) Using the `try!` macro makes it very easy to simplify our last example. Since @@ -1271,7 +1271,7 @@ macro_rules! try { ``` This is not its real definition. Its real definition is -[in the standard library](../std/macro.try!.html): +[in the standard library](../std/macro.try.html): @@ -2178,7 +2178,7 @@ heuristics! [`From`](../std/convert/trait.From.html) and [`Error`](../std/error/trait.Error.html) - impls to make the [`try!`](../std/macro.try!.html) + impls to make the [`try!`](../std/macro.try.html) macro more ergonomic. * If you're writing a library and your code can produce errors, define your own error type and implement the diff --git a/src/doc/book/ffi.md b/src/doc/book/ffi.md index ca104ff29a..8709c3f4b7 100644 --- a/src/doc/book/ffi.md +++ b/src/doc/book/ffi.md @@ -471,7 +471,7 @@ extern { fn main() { println!("You have readline version {} installed.", - rl_readline_version as i32); + unsafe { rl_readline_version as i32 }); } ``` @@ -539,6 +539,7 @@ This is currently hidden behind the `abi_vectorcall` gate and is subject to chan * `system` * `C` * `win64` +* `sysv64` Most of the abis in this list are self-explanatory, but the `system` abi may seem a little odd. This constraint selects whatever the appropriate ABI is for diff --git a/src/doc/book/getting-started.md b/src/doc/book/getting-started.md index 700ab2be58..bff448aadd 100644 --- a/src/doc/book/getting-started.md +++ b/src/doc/book/getting-started.md @@ -230,12 +230,13 @@ $ cd hello_world ## Writing and Running a Rust Program -Next, make a new source file and call it *main.rs*. Rust files always end -in a *.rs* extension. If you’re using more than one word in your filename, use -an underscore to separate them; for example, you'd use *hello_world.rs* rather -than *helloworld.rs*. +We need to create a source file for our Rust program. Rust files always end +in a *.rs* extension. If you are using more than one word in your filename, +use an underscore to separate them; for example, you would use +*my_program.rs* rather than *myprogram.rs*. -Now open the *main.rs* file you just created, and type the following code: +Now, make a new file and call it *main.rs*. Open the file and type +the following code: ```rust fn main() { diff --git a/src/doc/book/lang-items.md b/src/doc/book/lang-items.md index 72a3c08225..de7dbab3f1 100644 --- a/src/doc/book/lang-items.md +++ b/src/doc/book/lang-items.md @@ -57,8 +57,8 @@ fn main(argc: isize, argv: *const *const u8) -> isize { 0 } -#[lang = "eh_personality"] extern fn eh_personality() {} -#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} } +#[lang = "eh_personality"] extern fn rust_eh_personality() {} +#[lang = "panic_fmt"] extern fn rust_begin_panic() -> ! { loop {} } # #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {} # #[no_mangle] pub extern fn rust_eh_register_frames () {} # #[no_mangle] pub extern fn rust_eh_unregister_frames () {} @@ -73,8 +73,8 @@ Other features provided by lang items include: `==`, `<`, dereferencing (`*`) and `+` (etc.) operators are all marked with lang items; those specific four are `eq`, `ord`, `deref`, and `add` respectively. -- stack unwinding and general failure; the `eh_personality`, `fail` - and `fail_bounds_checks` lang items. +- stack unwinding and general failure; the `eh_personality`, + `eh_unwind_resume`, `fail` and `fail_bounds_checks` lang items. - the traits in `std::marker` used to indicate types of various kinds; lang items `send`, `sync` and `copy`. - the marker types and variance indicators found in diff --git a/src/doc/book/macros.md b/src/doc/book/macros.md index 9f40829f42..78fe07ec1b 100644 --- a/src/doc/book/macros.md +++ b/src/doc/book/macros.md @@ -662,7 +662,7 @@ Here are some common macros you’ll see in Rust code. This macro causes the current thread to panic. You can give it a message to panic with: -```rust,no_run +```rust,should_panic panic!("oh no!"); ``` @@ -688,7 +688,7 @@ These two macros are used in tests. `assert!` takes a boolean. `assert_eq!` takes two values and checks them for equality. `true` passes, `false` `panic!`s. Like this: -```rust,no_run +```rust,should_panic // A-ok! assert!(true); diff --git a/src/doc/book/nightly-rust.md b/src/doc/book/nightly-rust.md index b3be71038a..25570cb550 100644 --- a/src/doc/book/nightly-rust.md +++ b/src/doc/book/nightly-rust.md @@ -54,7 +54,7 @@ binary downloads][install-page]. Oh, we should also mention the officially supported platforms: -* Windows (7, 8, Server 2008 R2) +* Windows (7+) * Linux (2.6.18 or later, various distributions), x86 and x86-64 * OSX 10.7 (Lion) or greater, x86 and x86-64 diff --git a/src/doc/book/no-stdlib.md b/src/doc/book/no-stdlib.md index 6fd7cf6692..2604ca8d4c 100644 --- a/src/doc/book/no-stdlib.md +++ b/src/doc/book/no-stdlib.md @@ -55,7 +55,13 @@ fn start(_argc: isize, _argv: *const *const u8) -> isize { // provided by libstd. #[lang = "eh_personality"] #[no_mangle] -pub extern fn eh_personality() { +pub extern fn rust_eh_personality() { +} + +// This function may be needed based on the compilation target. +#[lang = "eh_unwind_resume"] +#[no_mangle] +pub extern fn rust_eh_unwind_resume() { } #[lang = "panic_fmt"] @@ -87,12 +93,18 @@ pub extern fn main(_argc: i32, _argv: *const *const u8) -> i32 { 0 } -// These functions and traits are used by the compiler, but not +// These functions are used by the compiler, but not // for a bare-bones hello world. These are normally // provided by libstd. #[lang = "eh_personality"] #[no_mangle] -pub extern fn eh_personality() { +pub extern fn rust_eh_personality() { +} + +// This function may be needed based on the compilation target. +#[lang = "eh_unwind_resume"] +#[no_mangle] +pub extern fn rust_eh_unwind_resume() { } #[lang = "panic_fmt"] @@ -104,7 +116,7 @@ pub extern fn rust_begin_panic(_msg: core::fmt::Arguments, } ``` -## More about the langauge items +## More about the language items The compiler currently makes a few assumptions about symbols which are available in the executable to call. Normally these functions are provided by @@ -112,15 +124,20 @@ the standard library, but without it you must define your own. These symbols are called "language items", and they each have an internal name, and then a signature that an implementation must conform to. -The first of these two functions, `eh_personality`, is used by the failure +The first of these functions, `rust_eh_personality`, is used by the failure mechanisms of the compiler. This is often mapped to GCC's personality function (see the [libstd implementation][unwind] for more information), but crates which do not trigger a panic can be assured that this function is never -called. Both the language item and the symbol name are `eh_personality`. - +called. The language item's name is `eh_personality`. + [unwind]: https://github.com/rust-lang/rust/blob/master/src/libpanic_unwind/gcc.rs -The second function, `panic_fmt`, is also used by the failure mechanisms of the +The second function, `rust_begin_panic`, is also used by the failure mechanisms of the compiler. When a panic happens, this controls the message that's displayed on the screen. While the language item's name is `panic_fmt`, the symbol name is `rust_begin_panic`. + +A third function, `rust_eh_unwind_resume`, is also needed if the `custom_unwind_resume` +flag is set in the options of the compilation target. It allows customizing the +process of resuming unwind at the end of the landing pads. The language item's name +is `eh_unwind_resume`. diff --git a/src/doc/book/ownership.md b/src/doc/book/ownership.md index 23ca21b3b4..a711397b21 100644 --- a/src/doc/book/ownership.md +++ b/src/doc/book/ownership.md @@ -57,13 +57,13 @@ of scope at the end of `foo()`, Rust will clean up everything related to the vector, even the heap-allocated memory. This happens deterministically, at the end of the scope. -We'll cover [vectors] in detail later in this chapter; we only use them +We covered [vectors] in the previous chapter; we use them here as an example of a type that allocates space on the heap at runtime. They behave like [arrays], except their size may change by `push()`ing more elements onto them. Vectors have a [generic type][generics] `Vec`, so in this example `v` will have type -`Vec`. We'll cover generics in detail later in this chapter. +`Vec`. We'll cover [generics] in detail in a later chapter. [arrays]: primitive-types.html#arrays [vectors]: vectors.html diff --git a/src/doc/book/references-and-borrowing.md b/src/doc/book/references-and-borrowing.md index 57bfbce8b8..2ec3a00c0d 100644 --- a/src/doc/book/references-and-borrowing.md +++ b/src/doc/book/references-and-borrowing.md @@ -152,7 +152,7 @@ the thing `y` points at. You’ll notice that `x` had to be marked `mut` as well If it wasn’t, we couldn’t take a mutable borrow to an immutable value. You'll also notice we added an asterisk (`*`) in front of `y`, making it `*y`, -this is because `y` is a `&mut` reference. You'll need to use astrisks to +this is because `y` is a `&mut` reference. You'll need to use asterisks to access the contents of a reference as well. Otherwise, `&mut` references are like references. There _is_ a large diff --git a/src/doc/book/traits.md b/src/doc/book/traits.md index e685cb129b..b0d954adf6 100644 --- a/src/doc/book/traits.md +++ b/src/doc/book/traits.md @@ -47,6 +47,34 @@ As you can see, the `trait` block looks very similar to the `impl` block, but we don’t define a body, only a type signature. When we `impl` a trait, we use `impl Trait for Item`, rather than only `impl Item`. +`Self` may be used in a type annotation to refer to an instance of the type +implementing this trait passed as a parameter. `Self`, `&Self` or `&mut Self` +may be used depending on the level of ownership required. + +```rust +struct Circle { + x: f64, + y: f64, + radius: f64, +} + +trait HasArea { + fn area(&self) -> f64; + + fn is_larger(&self, &Self) -> bool; +} + +impl HasArea for Circle { + fn area(&self) -> f64 { + std::f64::consts::PI * (self.radius * self.radius) + } + + fn is_larger(&self, other: &Self) -> bool { + self.area() > other.area() + } +} +``` + ## Trait bounds on generic functions Traits are useful because they allow a type to make certain promises about its @@ -247,7 +275,7 @@ won’t have its methods: [write]: ../std/io/trait.Write.html ```rust,ignore -let mut f = std::fs::File::open("foo.txt").expect("Couldn’t open foo.txt"); +let mut f = std::fs::File::create("foo.txt").expect("Couldn’t create foo.txt"); let buf = b"whatever"; // byte string literal. buf: &[u8; 8] let result = f.write(buf); # result.unwrap(); // ignore the error @@ -263,10 +291,10 @@ let result = f.write(buf); We need to `use` the `Write` trait first: -```rust,ignore +```rust,no_run use std::io::Write; -let mut f = std::fs::File::open("foo.txt").expect("Couldn’t open foo.txt"); +let mut f = std::fs::File::create("foo.txt").expect("Couldn’t create foo.txt"); let buf = b"whatever"; let result = f.write(buf); # result.unwrap(); // ignore the error diff --git a/src/doc/grammar.md b/src/doc/grammar.md index fac488d9c4..be64379b51 100644 --- a/src/doc/grammar.md +++ b/src/doc/grammar.md @@ -172,6 +172,11 @@ token : simple_token | ident | literal | symbol | whitespace token ; Each of these keywords has special meaning in its grammar, and all of them are excluded from the `ident` rule. +Not all of these keywords are used by the language. Some of them were used +before Rust 1.0, and were left reserved once their implementations were +removed. Some of them were reserved before 1.0 to make space for possible +future features. + ### Literals ```antlr diff --git a/src/doc/nomicon/ownership.md b/src/doc/nomicon/ownership.md index 6be8d3b702..a6ecf6ab91 100644 --- a/src/doc/nomicon/ownership.md +++ b/src/doc/nomicon/ownership.md @@ -52,7 +52,7 @@ let mut data = vec![1, 2, 3]; let x = &data[0]; // OH NO! `push` causes the backing storage of `data` to be reallocated. -// Dangling pointer! User after free! Alas! +// Dangling pointer! Use after free! Alas! // (this does not compile in Rust) data.push(4); diff --git a/src/doc/nomicon/safe-unsafe-meaning.md b/src/doc/nomicon/safe-unsafe-meaning.md index c4f939a608..adede0ec91 100644 --- a/src/doc/nomicon/safe-unsafe-meaning.md +++ b/src/doc/nomicon/safe-unsafe-meaning.md @@ -26,10 +26,6 @@ can therefore be trusted. You can use `unsafe` on a trait implementation to declare that the implementation of that trait has adhered to whatever contracts the trait's documentation requires. -There is also the `#[unsafe_no_drop_flag]` attribute, which exists for -historic reasons and is being phased out. See the section on [drop flags] -for details. - The standard library has a number of unsafe functions, including: * `slice::get_unchecked`, which performs unchecked indexing, allowing diff --git a/src/doc/reference.md b/src/doc/reference.md index f0ab1488d4..9f4830cd19 100644 --- a/src/doc/reference.md +++ b/src/doc/reference.md @@ -1677,6 +1677,7 @@ There are also some platform-specific ABI strings: * `extern "cdecl"` -- The default for x86\_32 C code. * `extern "stdcall"` -- The default for the Win32 API on x86\_32. * `extern "win64"` -- The default for C code on x86\_64 Windows. +* `extern "sysv64"` -- The default for C code on non-Windows x86\_64. * `extern "aapcs"` -- The default for ARM. * `extern "fastcall"` -- The `fastcall` ABI -- corresponds to MSVC's `__fastcall` and GCC and clang's `__attribute__((fastcall))` @@ -2058,10 +2059,6 @@ macro scope. outside of its dynamic extent), and thus this attribute has the word "unsafe" in its name. To use this, the `unsafe_destructor_blind_to_params` feature gate must be enabled. -- `unsafe_no_drop_flag` - on structs, remove the flag that prevents - destructors from being run twice. Destructors might be run multiple times on - the same object with this attribute. To use this, the `unsafe_no_drop_flag` feature - gate must be enabled. - `doc` - Doc comments such as `/// foo` are equivalent to `#[doc = "foo"]`. - `rustc_on_unimplemented` - Write a custom note to be shown along with the error when the trait is found to be unimplemented on a type. @@ -2070,6 +2067,9 @@ macro scope. trait of the same name. `{Self}` will be replaced with the type that is supposed to implement the trait but doesn't. To use this, the `on_unimplemented` feature gate must be enabled. +- `must_use` - on structs and enums, will warn if a value of this type isn't used or + assigned to a variable. You may also include an optional message by using + `#[must_use = "message"]` which will be given alongside the warning. ### Conditional compilation @@ -2283,7 +2283,7 @@ the `PartialEq` or `Clone` constraints for the appropriate `impl`: #[derive(PartialEq, Clone)] struct Foo { a: i32, - b: T + b: T, } ``` @@ -2441,6 +2441,9 @@ The currently implemented features of the reference compiler are: into a Rust program. This capability, especially the signature for the annotated function, is subject to change. +* `static_in_const` - Enables lifetime elision with a `'static` default for + `const` and `static` item declarations. + * `thread_local` - The usage of the `#[thread_local]` attribute is experimental and should be seen as unstable. This attribute is used to declare a `static` as being unique per-thread leveraging @@ -2454,12 +2457,6 @@ The currently implemented features of the reference compiler are: * `unboxed_closures` - Rust's new closure design, which is currently a work in progress feature with many known bugs. -* `unsafe_no_drop_flag` - Allows use of the `#[unsafe_no_drop_flag]` attribute, - which removes hidden flag added to a type that - implements the `Drop` trait. The design for the - `Drop` flag is subject to change, and this feature - may be removed in the future. - * `unmarked_api` - Allows use of items within a `#![staged_api]` crate which have not been marked with a stability marker. Such items should not be allowed by the compiler to exist, @@ -2475,8 +2472,7 @@ The currently implemented features of the reference compiler are: * - `default_type_parameter_fallback` - Allows type parameter defaults to influence type inference. -* - `stmt_expr_attributes` - Allows attributes on expressions and - non-item statements. +* - `stmt_expr_attributes` - Allows attributes on expressions. * - `type_ascription` - Allows type ascription expressions `expr: Type`. @@ -2485,6 +2481,9 @@ The currently implemented features of the reference compiler are: * - `dotdot_in_tuple_patterns` - Allows `..` in tuple (struct) patterns. +* - `abi_sysv64` - Allows the usage of the system V AMD64 calling convention + (e.g. `extern "sysv64" func fn_();`) + If a feature is promoted to a language feature, then all existing programs will start to receive compilation warnings about `#![feature]` directives which enabled the new feature (because the directive is no longer necessary). However, if a @@ -3896,7 +3895,7 @@ Coercion is allowed between the following types: use std::ops::Deref; struct CharContainer { - value: char + value: char, } impl Deref for CharContainer { diff --git a/src/doc/rust.css b/src/doc/rust.css index 9c1b3724d8..262db5673e 100644 --- a/src/doc/rust.css +++ b/src/doc/rust.css @@ -159,7 +159,7 @@ em { footer { border-top: 1px solid #ddd; - font-size: 14.3px; + font-size: 14px; font-style: italic; padding-top: 5px; margin-top: 3em; diff --git a/src/doc/style/README.md b/src/doc/style/README.md deleted file mode 100644 index 8d837d1a1a..0000000000 --- a/src/doc/style/README.md +++ /dev/null @@ -1,64 +0,0 @@ -% Style Guidelines - -This document collects the emerging principles, conventions, abstractions, and -best practices for writing Rust code. - -Since Rust is evolving at a rapid pace, these guidelines are -preliminary. The hope is that writing them down explicitly will help -drive discussion, consensus and adoption. - -Whenever feasible, guidelines provide specific examples from Rust's standard -libraries. - -### Guideline statuses - -Every guideline has a status: - -* **[FIXME]**: Marks places where there is more work to be done. In - some cases, that just means going through the RFC process. - -* **[FIXME #NNNNN]**: Like **[FIXME]**, but links to the issue tracker. - -* **[RFC #NNNN]**: Marks accepted guidelines, linking to the rust-lang - RFC establishing them. - -### Guideline stabilization - -One purpose of these guidelines is to reach decisions on a number of -cross-cutting API and stylistic choices. Discussion and development of -the guidelines will happen primarily on https://internals.rust-lang.org/, -using the Guidelines category. Discussion can also occur on the -[guidelines issue tracker](https://github.com/rust-lang/rust-guidelines). - -Guidelines that are under development or discussion will be marked with the -status **[FIXME]**, with a link to the issue tracker when appropriate. - -Once a concrete guideline is ready to be proposed, it should be filed -as an [FIXME: needs RFC](https://github.com/rust-lang/rfcs). If the RFC is -accepted, the official guidelines will be updated to match, and will -include the tag **[RFC #NNNN]** linking to the RFC document. - -### What's in this document - -This document is broken into four parts: - -* **[Style](style/README.md)** provides a set of rules governing naming conventions, - whitespace, and other stylistic issues. - -* **[Guidelines by Rust feature](features/README.md)** places the focus on each of - Rust's features, starting from expressions and working the way out toward - crates, dispensing guidelines relevant to each. - -* **Topical guidelines and patterns**. The rest of the document proceeds by - cross-cutting topic, starting with - [Ownership and resources](ownership/README.md). - -* **APIs for a changing Rust** - discusses the forward-compatibility hazards, especially those that interact - with the pre-1.0 library stabilization process. - -> **[FIXME]** Add cross-references throughout this document to the tutorial, -> reference manual, and other guides. - -> **[FIXME]** What are some _non_-goals, _non_-principles, or _anti_-patterns that -> we should document? diff --git a/src/doc/style/SUMMARY.md b/src/doc/style/SUMMARY.md deleted file mode 100644 index 508ede6c4a..0000000000 --- a/src/doc/style/SUMMARY.md +++ /dev/null @@ -1,50 +0,0 @@ -# Summary - -* [Style](style/README.md) - * [Whitespace](style/whitespace.md) - * [Comments](style/comments.md) - * [Braces, semicolons, commas](style/braces.md) - * [Naming](style/naming/README.md) - * [Ownership variants](style/naming/ownership.md) - * [Containers/wrappers](style/naming/containers.md) - * [Conversions](style/naming/conversions.md) - * [Iterators](style/naming/iterators.md) - * [Imports](style/imports.md) - * [Organization](style/organization.md) -* [Guidelines by Rust feature](features/README.md) - * [Let binding](features/let.md) - * [Pattern matching](features/match.md) - * [Loops](features/loops.md) - * [Functions and methods](features/functions-and-methods/README.md) - * [Input](features/functions-and-methods/input.md) - * [Output](features/functions-and-methods/output.md) - * [For convenience](features/functions-and-methods/convenience.md) - * [Types](features/types/README.md) - * [Conversions](features/types/conversions.md) - * [The newtype pattern](features/types/newtype.md) - * [Traits](features/traits/README.md) - * [For generics](features/traits/generics.md) - * [For objects](features/traits/objects.md) - * [For overloading](features/traits/overloading.md) - * [For extensions](features/traits/extensions.md) - * [For reuse](features/traits/reuse.md) - * [Common traits](features/traits/common.md) - * [Modules](features/modules.md) - * [Crates](features/crates.md) -* [Ownership and resources](ownership/README.md) - * [Constructors](ownership/constructors.md) - * [Builders](ownership/builders.md) - * [Destructors](ownership/destructors.md) - * [RAII](ownership/raii.md) - * [Cells and smart pointers](ownership/cell-smart.md) -* [Errors](errors/README.md) - * [Signaling](errors/signaling.md) - * [Handling](errors/handling.md) - * [Propagation](errors/propagation.md) - * [Ergonomics](errors/ergonomics.md) -* [Safety and guarantees](safety/README.md) - * [Using unsafe](safety/unsafe.md) - * [Library guarantees](safety/lib-guarantees.md) -* [Testing](testing/README.md) - * [Unit testing](testing/unit.md) -* [FFI, platform-specific code](platform.md) diff --git a/src/doc/style/errors/README.md b/src/doc/style/errors/README.md deleted file mode 100644 index 444da26ff8..0000000000 --- a/src/doc/style/errors/README.md +++ /dev/null @@ -1,3 +0,0 @@ -% Errors - -> **[FIXME]** Add some general text here. diff --git a/src/doc/style/errors/ergonomics.md b/src/doc/style/errors/ergonomics.md deleted file mode 100644 index 269f2a2894..0000000000 --- a/src/doc/style/errors/ergonomics.md +++ /dev/null @@ -1,66 +0,0 @@ -% Ergonomic error handling - -Error propagation with raw `Result`s can require tedious matching and -repackaging. This tedium is largely alleviated by the `try!` macro, -and can be completely removed (in some cases) by the "`Result`-`impl`" -pattern. - -### The `try!` macro - -Prefer - -```rust,ignore -use std::io::{File, Open, Write, IoError}; - -struct Info { - name: String, - age: i32, - rating: i32 -} - -fn write_info(info: &Info) -> Result<(), IoError> { - let mut file = File::open_mode(&Path::new("my_best_friends.txt"), - Open, Write); - // Early return on error - try!(file.write_line(&format!("name: {}", info.name))); - try!(file.write_line(&format!("age: {}", info.age))); - try!(file.write_line(&format!("rating: {}", info.rating))); - return Ok(()); -} -``` - -over - -```rust,ignore -use std::io::{File, Open, Write, IoError}; - -struct Info { - name: String, - age: i32, - rating: i32 -} - -fn write_info(info: &Info) -> Result<(), IoError> { - let mut file = File::open_mode(&Path::new("my_best_friends.txt"), - Open, Write); - // Early return on error - match file.write_line(&format!("name: {}", info.name)) { - Ok(_) => (), - Err(e) => return Err(e) - } - match file.write_line(&format!("age: {}", info.age)) { - Ok(_) => (), - Err(e) => return Err(e) - } - return file.write_line(&format!("rating: {}", info.rating)); -} -``` - -See -[the `result` module documentation](https://doc.rust-lang.org/stable/std/result/index.html#the-try-macro) -for more details. - -### The `Result`-`impl` pattern [FIXME] - -> **[FIXME]** Document the way that the `io` module uses trait impls -> on `std::io::Result` to painlessly propagate errors. diff --git a/src/doc/style/errors/handling.md b/src/doc/style/errors/handling.md deleted file mode 100644 index 9b8a00d736..0000000000 --- a/src/doc/style/errors/handling.md +++ /dev/null @@ -1,7 +0,0 @@ -% Handling errors - -### Use thread isolation to cope with failure. [FIXME] - -> **[FIXME]** Explain how to isolate threads and detect thread failure for recovery. - -### Consuming `Result` [FIXME] diff --git a/src/doc/style/errors/propagation.md b/src/doc/style/errors/propagation.md deleted file mode 100644 index 0a347cd577..0000000000 --- a/src/doc/style/errors/propagation.md +++ /dev/null @@ -1,8 +0,0 @@ -% Propagation - -> **[FIXME]** We need guidelines on how to layer error information up a stack of -> abstractions. - -### Error interoperation [FIXME] - -> **[FIXME]** Document the `FromError` infrastructure. diff --git a/src/doc/style/errors/signaling.md b/src/doc/style/errors/signaling.md deleted file mode 100644 index 4038ec10b9..0000000000 --- a/src/doc/style/errors/signaling.md +++ /dev/null @@ -1,125 +0,0 @@ -% Signaling errors [RFC #236] - -> The guidelines below were approved by [RFC #236](https://github.com/rust-lang/rfcs/pull/236). - -Errors fall into one of three categories: - -* Catastrophic errors, e.g. out-of-memory. -* Contract violations, e.g. wrong input encoding, index out of bounds. -* Obstructions, e.g. file not found, parse error. - -The basic principle of the convention is that: - -* Catastrophic errors and programming errors (bugs) can and should only be -recovered at a *coarse grain*, i.e. a thread boundary. -* Obstructions preventing an operation should be reported at a maximally *fine -grain* -- to the immediate invoker of the operation. - -## Catastrophic errors - -An error is _catastrophic_ if there is no meaningful way for the current thread to -continue after the error occurs. - -Catastrophic errors are _extremely_ rare, especially outside of `libstd`. - -**Canonical examples**: out of memory, stack overflow. - -### For catastrophic errors, panic - -For errors like stack overflow, Rust currently aborts the process, but -could in principle panic, which (in the best case) would allow -reporting and recovery from a supervisory thread. - -## Contract violations - -An API may define a contract that goes beyond the type checking enforced by the -compiler. For example, slices support an indexing operation, with the contract -that the supplied index must be in bounds. - -Contracts can be complex and involve more than a single function invocation. For -example, the `RefCell` type requires that `borrow_mut` not be called until all -existing borrows have been relinquished. - -### For contract violations, panic - -A contract violation is always a bug, and for bugs we follow the Erlang -philosophy of "let it crash": we assume that software *will* have bugs, and we -design coarse-grained thread boundaries to report, and perhaps recover, from these -bugs. - -### Contract design - -One subtle aspect of these guidelines is that the contract for a function is -chosen by an API designer -- and so the designer also determines what counts as -a violation. - -This RFC does not attempt to give hard-and-fast rules for designing -contracts. However, here are some rough guidelines: - -* Prefer expressing contracts through static types whenever possible. - -* It *must* be possible to write code that uses the API without violating the - contract. - -* Contracts are most justified when violations are *inarguably* bugs -- but this - is surprisingly rare. - -* Consider whether the API client could benefit from the contract-checking - logic. The checks may be expensive. Or there may be useful programming - patterns where the client does not want to check inputs before hand, but would - rather attempt the operation and then find out whether the inputs were invalid. - -* When a contract violation is the *only* kind of error a function may encounter - -- i.e., there are no obstructions to its success other than "bad" inputs -- - using `Result` or `Option` instead is especially warranted. Clients can then use - `unwrap` to assert that they have passed valid input, or re-use the error - checking done by the API for their own purposes. - -* When in doubt, use loose contracts and instead return a `Result` or `Option`. - -## Obstructions - -An operation is *obstructed* if it cannot be completed for some reason, even -though the operation's contract has been satisfied. Obstructed operations may -have (documented!) side effects -- they are not required to roll back after -encountering an obstruction. However, they should leave the data structures in -a "coherent" state (satisfying their invariants, continuing to guarantee safety, -etc.). - -Obstructions may involve external conditions (e.g., I/O), or they may involve -aspects of the input that are not covered by the contract. - -**Canonical examples**: file not found, parse error. - -### For obstructions, use `Result` - -The -[`Result` type](https://doc.rust-lang.org/stable/std/result/index.html) -represents either a success (yielding `T`) or failure (yielding `E`). By -returning a `Result`, a function allows its clients to discover and react to -obstructions in a fine-grained way. - -#### What about `Option`? - -The `Option` type should not be used for "obstructed" operations; it -should only be used when a `None` return value could be considered a -"successful" execution of the operation. - -This is of course a somewhat subjective question, but a good litmus -test is: would a reasonable client ever ignore the result? The -`Result` type provides a lint that ensures the result is actually -inspected, while `Option` does not, and this difference of behavior -can help when deciding between the two types. - -Another litmus test: can the operation be understood as asking a -question (possibly with sideeffects)? Operations like `pop` on a -vector can be viewed as asking for the contents of the first element, -with the side effect of removing it if it exists -- with an `Option` -return value. - -## Do not provide both `Result` and `panic!` variants. - -An API should not provide both `Result`-producing and `panic`king versions of an -operation. It should provide just the `Result` version, allowing clients to use -`try!` or `unwrap` instead as needed. This is part of the general pattern of -cutting down on redundant variants by instead using method chaining. diff --git a/src/doc/style/features/README.md b/src/doc/style/features/README.md deleted file mode 100644 index 09657503d2..0000000000 --- a/src/doc/style/features/README.md +++ /dev/null @@ -1,9 +0,0 @@ -% Guidelines by language feature - -Rust provides a unique combination of language features, some new and some -old. This section gives guidance on when and how to use Rust's features, and -brings attention to some of the tradeoffs between different features. - -Notably missing from this section is an in-depth discussion of Rust's pointer -types (both built-in and in the library). The topic of pointers is discussed at -length in a [separate section on ownership](../ownership/README.md). diff --git a/src/doc/style/features/crates.md b/src/doc/style/features/crates.md deleted file mode 100644 index 4748b05f17..0000000000 --- a/src/doc/style/features/crates.md +++ /dev/null @@ -1,6 +0,0 @@ -% Crates - -> **[FIXME]** What general guidelines should we provide for crate design? - -> Possible topics: facades; per-crate preludes (to be imported as globs); -> "lib.rs" diff --git a/src/doc/style/features/functions-and-methods/README.md b/src/doc/style/features/functions-and-methods/README.md deleted file mode 100644 index a3559ca3e7..0000000000 --- a/src/doc/style/features/functions-and-methods/README.md +++ /dev/null @@ -1,44 +0,0 @@ -% Functions and methods - -### Prefer methods to functions if there is a clear receiver. **[FIXME: needs RFC]** - -Prefer - -```rust,ignore -impl Foo { - pub fn frob(&self, w: widget) { ... } -} -``` - -over - -```rust,ignore -pub fn frob(foo: &Foo, w: widget) { ... } -``` - -for any operation that is clearly associated with a particular -type. - -Methods have numerous advantages over functions: - -* They do not need to be imported or qualified to be used: all you - need is a value of the appropriate type. -* Their invocation performs autoborrowing (including mutable borrows). -* They make it easy to answer the question "what can I do with a value - of type `T`" (especially when using rustdoc). -* They provide `self` notation, which is more concise and often more - clearly conveys ownership distinctions. - -> **[FIXME]** Revisit these guidelines with -> [UFCS](https://github.com/nick29581/rfcs/blob/ufcs/0000-ufcs.md) and -> conventions developing around it. - - - -### Guidelines for inherent methods. **[FIXME]** - -> **[FIXME]** We need guidelines for when to provide inherent methods on a type, -> versus methods through a trait or functions. - -> **NOTE**: Rules for method resolution around inherent methods are in flux, -> which may impact the guidelines. diff --git a/src/doc/style/features/functions-and-methods/convenience.md b/src/doc/style/features/functions-and-methods/convenience.md deleted file mode 100644 index 69fd3772a7..0000000000 --- a/src/doc/style/features/functions-and-methods/convenience.md +++ /dev/null @@ -1,43 +0,0 @@ -% Convenience methods - -### Provide small, coherent sets of convenience methods. **[FIXME: needs RFC]** - -_Convenience methods_ wrap up existing functionality in a more convenient -way. The work done by a convenience method varies widely: - -* _Re-providing functions as methods_. For example, the `std::path::Path` type - provides methods like `stat` on `Path`s that simply invoke the corresponding - function in `std::io::fs`. -* _Skipping through conversions_. For example, the `str` type provides a - `.len()` convenience method which is also expressible as `.as_bytes().len()`. - Sometimes the conversion is more complex: the `str` module also provides - `from_chars`, which encapsulates a simple use of iterators. -* _Encapsulating common arguments_. For example, vectors of `&str`s - provide a `connect` as well as a special case, `concat`, that is expressible - using `connect` with a fixed separator of `""`. -* _Providing more efficient special cases_. The `connect` and `concat` example - also applies here: singling out `concat` as a special case allows for a more - efficient implementation. - - Note, however, that the `connect` method actually detects the special case - internally and invokes `concat`. Usually, it is not necessary to add a public - convenience method just for efficiency gains; there should also be a - _conceptual_ reason to add it, e.g. because it is such a common special case. - -It is tempting to add convenience methods in a one-off, haphazard way as -common use patterns emerge. Avoid this temptation, and instead _design_ small, -coherent sets of convenience methods that are easy to remember: - -* _Small_: Avoid combinatorial explosions of convenience methods. For example, - instead of adding `_str` variants of methods that provide a `str` output, - instead ensure that the normal output type of methods is easily convertible to - `str`. -* _Coherent_: Look for small groups of convenience methods that make sense to - include together. For example, the `Path` API mentioned above includes a small - selection of the most common filesystem operations that take a `Path` - argument. If one convenience method strongly suggests the existence of others, - consider adding the whole group. -* _Memorable_: It is not worth saving a few characters of typing if you have to - look up the name of a convenience method every time you use it. Add - convenience methods with names that are obvious and easy to remember, and add - them for the most common or painful use cases. diff --git a/src/doc/style/features/functions-and-methods/input.md b/src/doc/style/features/functions-and-methods/input.md deleted file mode 100644 index 5b63a45144..0000000000 --- a/src/doc/style/features/functions-and-methods/input.md +++ /dev/null @@ -1,203 +0,0 @@ -% Input to functions and methods - -### Let the client decide when to copy and where to place data. [FIXME: needs RFC] - -#### Copying: - -Prefer - -```rust,ignore -fn foo(b: Bar) { - // use b as owned, directly -} -``` - -over - -```rust,ignore -fn foo(b: &Bar) { - let b = b.clone(); - // use b as owned after cloning -} -``` - -If a function requires ownership of a value of unknown type `T`, but does not -otherwise need to make copies, the function should take ownership of the -argument (pass by value `T`) rather than using `.clone()`. That way, the caller -can decide whether to relinquish ownership or to `clone`. - -Similarly, the `Copy` trait bound should only be demanded it when absolutely -needed, not as a way of signaling that copies should be cheap to make. - -#### Placement: - -Prefer - -```rust,ignore -fn foo(b: Bar) -> Bar { ... } -``` - -over - -```rust,ignore -fn foo(b: Box) -> Box { ... } -``` - -for concrete types `Bar` (as opposed to trait objects). This way, the caller can -decide whether to place data on the stack or heap. No overhead is imposed by -letting the caller determine the placement. - -### Minimize assumptions about parameters. [FIXME: needs RFC] - -The fewer assumptions a function makes about its inputs, the more widely usable -it becomes. - -#### Minimizing assumptions through generics: - -Prefer - -```rust,ignore -fn foo>(c: T) { ... } -``` - -over any of - -```rust,ignore -fn foo(c: &[i32]) { ... } -fn foo(c: &Vec) { ... } -fn foo(c: &SomeOtherCollection) { ... } -``` - -if the function only needs to iterate over the data. - -More generally, consider using generics to pinpoint the assumptions a function -needs to make about its arguments. - -On the other hand, generics can make it more difficult to read and understand a -function's signature. Aim for "natural" parameter types that a neither overly -concrete nor overly abstract. See the discussion on -[traits](../traits/README.md) for more guidance. - - -#### Minimizing ownership assumptions: - -Prefer either of - -```rust,ignore -fn foo(b: &Bar) { ... } -fn foo(b: &mut Bar) { ... } -``` - -over - -```rust,ignore -fn foo(b: Bar) { ... } -``` - -That is, prefer borrowing arguments rather than transferring ownership, unless -ownership is actually needed. - -### Prefer compound return types to out-parameters. [FIXME: needs RFC] - -Prefer - -```rust,ignore -fn foo() -> (Bar, Bar) -``` - -over - -```rust,ignore -fn foo(output: &mut Bar) -> Bar -``` - -for returning multiple `Bar` values. - -Compound return types like tuples and structs are efficiently compiled -and do not require heap allocation. If a function needs to return -multiple values, it should do so via one of these types. - -The primary exception: sometimes a function is meant to modify data -that the caller already owns, for example to re-use a buffer: - -```rust,ignore -fn read(&mut self, buf: &mut [u8]) -> std::io::Result -``` - -(From the [Read trait](https://doc.rust-lang.org/stable/std/io/trait.Read.html#tymethod.read).) - -### Consider validating arguments, statically or dynamically. [FIXME: needs RFC] - -_Note: this material is closely related to - [library-level guarantees](../../safety/lib-guarantees.md)._ - -Rust APIs do _not_ generally follow the -[robustness principle](https://en.wikipedia.org/wiki/Robustness_principle): "be -conservative in what you send; be liberal in what you accept". - -Instead, Rust code should _enforce_ the validity of input whenever practical. - -Enforcement can be achieved through the following mechanisms (listed -in order of preference). - -#### Static enforcement: - -Choose an argument type that rules out bad inputs. - -For example, prefer - -```rust,ignore -enum FooMode { - Mode1, - Mode2, - Mode3, -} -fn foo(mode: FooMode) { ... } -``` - -over - -```rust,ignore -fn foo(mode2: bool, mode3: bool) { - assert!(!mode2 || !mode3); - ... -} -``` - -Static enforcement usually comes at little run-time cost: it pushes the -costs to the boundaries. It also catches bugs early, during compilation, -rather than through run-time failures. - -On the other hand, some properties are difficult or impossible to -express using types. - -#### Dynamic enforcement: - -Validate the input as it is processed (or ahead of time, if necessary). Dynamic -checking is often easier to implement than static checking, but has several -downsides: - -1. Runtime overhead (unless checking can be done as part of processing the input). -2. Delayed detection of bugs. -3. Introduces failure cases, either via `panic!` or `Result`/`Option` types (see - the [error handling guidelines](../../errors/README.md)), which must then be - dealt with by client code. - -#### Dynamic enforcement with `debug_assert!`: - -Same as dynamic enforcement, but with the possibility of easily turning off -expensive checks for production builds. - -#### Dynamic enforcement with opt-out: - -Same as dynamic enforcement, but adds sibling functions that opt out of the -checking. - -The convention is to mark these opt-out functions with a suffix like -`_unchecked` or by placing them in a `raw` submodule. - -The unchecked functions can be used judiciously in cases where (1) performance -dictates avoiding checks and (2) the client is otherwise confident that the -inputs are valid. - -> **[FIXME]** Should opt-out functions be marked `unsafe`? diff --git a/src/doc/style/features/functions-and-methods/output.md b/src/doc/style/features/functions-and-methods/output.md deleted file mode 100644 index e26eee5336..0000000000 --- a/src/doc/style/features/functions-and-methods/output.md +++ /dev/null @@ -1,56 +0,0 @@ -% Output from functions and methods - -### Don't overpromise. [FIXME] - -> **[FIXME]** Add discussion of overly-specific return types, -> e.g. returning a compound iterator type rather than hiding it behind -> a use of newtype. - -### Let clients choose what to throw away. [FIXME: needs RFC] - -#### Return useful intermediate results: - -Many functions that answer a question also compute interesting related data. If -this data is potentially of interest to the client, consider exposing it in the -API. - -Prefer - -```rust,ignore -struct SearchResult { - found: bool, // item in container? - expected_index: usize // what would the item's index be? -} - -fn binary_search(&self, k: Key) -> SearchResult -``` -or - -```rust,ignore -fn binary_search(&self, k: Key) -> (bool, usize) -``` - -over - -```rust,ignore -fn binary_search(&self, k: Key) -> bool -``` - -#### Yield back ownership: - -Prefer - -```rust,ignore -fn from_utf8_owned(vv: Vec) -> Result> -``` - -over - -```rust,ignore -fn from_utf8_owned(vv: Vec) -> Option -``` - -The `from_utf8_owned` function gains ownership of a vector. In the successful -case, the function consumes its input, returning an owned string without -allocating or copying. In the unsuccessful case, however, the function returns -back ownership of the original slice. diff --git a/src/doc/style/features/let.md b/src/doc/style/features/let.md deleted file mode 100644 index ba9787b45f..0000000000 --- a/src/doc/style/features/let.md +++ /dev/null @@ -1,103 +0,0 @@ -% Let binding - -### Always separately bind RAII guards. [FIXME: needs RFC] - -Prefer - -```rust,ignore -fn use_mutex(m: sync::mutex::Mutex) { - let guard = m.lock(); - do_work(guard); - drop(guard); // unlock the lock - // do other work -} -``` - -over - -```rust,ignore -fn use_mutex(m: sync::mutex::Mutex) { - do_work(m.lock()); - // do other work -} -``` - -As explained in the [RAII guide](../ownership/raii.md), RAII guards are values -that represent ownership of some resource and whose destructor releases the -resource. Because the lifetime of guards are significant, they should always be -explicitly `let`-bound to make the lifetime clear. Consider using an explicit -`drop` to release the resource early. - -### Prefer conditional expressions to deferred initialization. [FIXME: needs RFC] - -Prefer - -```rust,ignore -let foo = match bar { - Baz => 0, - Quux => 1 -}; -``` - -over - -```rust,ignore -let foo; -match bar { - Baz => { - foo = 0; - } - Quux => { - foo = 1; - } -} -``` - -unless the conditions for initialization are too complex to fit into a simple -conditional expression. - -### Use type annotations for clarification; prefer explicit generics when inference fails. [FIXME: needs RFC] - -Prefer - -```rust,ignore -let v = s.iter().map(|x| x * 2) - .collect::>(); -``` - -over - -```rust,ignore -let v: Vec<_> = s.iter().map(|x| x * 2) - .collect(); -``` - -When the type of a value might be unclear to the _reader_ of the code, consider -explicitly annotating it in a `let`. - -On the other hand, when the type is unclear to the _compiler_, prefer to specify -the type by explicit generics instantiation, which is usually more clear. - -### Shadowing [FIXME] - -> **[FIXME]** Repeatedly shadowing a binding is somewhat common in Rust code. We -> need to articulate a guideline on when it is appropriate/useful and when not. - -### Prefer immutable bindings. [FIXME: needs RFC] - -Use `mut` bindings to signal the span during which a value is mutated: - -```rust,ignore -let mut v = Vec::new(); -// push things onto v -let v = v; -// use v immutably henceforth -``` - -### Prefer to bind all `struct` or tuple fields. [FIXME: needs RFC] - -When consuming a `struct` or tuple via a `let`, bind all of the fields rather -than using `..` to elide the ones you don't need. The benefit is that when -fields are added, the compiler will pinpoint all of the places where that type -of value was consumed, which will often need to be adjusted to take the new -field properly into account. diff --git a/src/doc/style/features/loops.md b/src/doc/style/features/loops.md deleted file mode 100644 index b144825f98..0000000000 --- a/src/doc/style/features/loops.md +++ /dev/null @@ -1,13 +0,0 @@ -% Loops - -### Prefer `for` to `while`. [FIXME: needs RFC] - -A `for` loop is preferable to a `while` loop, unless the loop counts in a -non-uniform way (making it difficult to express using `for`). - -### Guidelines for `loop`. [FIXME] - -> **[FIXME]** When is `loop` recommended? Some possibilities: -> * For optimistic retry algorithms -> * For servers -> * To avoid mutating local variables sometimes needed to fit `while` diff --git a/src/doc/style/features/match.md b/src/doc/style/features/match.md deleted file mode 100644 index 0d5a1184a0..0000000000 --- a/src/doc/style/features/match.md +++ /dev/null @@ -1,26 +0,0 @@ -% Pattern matching - -### Dereference `match` targets when possible. [FIXME: needs RFC] - -Prefer - -~~~~ignore -match *foo { - X(...) => ... - Y(...) => ... -} -~~~~ - -over - -~~~~ignore -match foo { - box X(...) => ... - box Y(...) => ... -} -~~~~ - - - - - diff --git a/src/doc/style/features/modules.md b/src/doc/style/features/modules.md deleted file mode 100644 index 995c5fda8a..0000000000 --- a/src/doc/style/features/modules.md +++ /dev/null @@ -1,133 +0,0 @@ -% Modules - -> **[FIXME]** What general guidelines should we provide for module design? - -> We should discuss visibility, nesting, `mod.rs`, and any interesting patterns -> around modules. - -### Headers [FIXME: needs RFC] - -Organize module headers as follows: - 1. [Imports](../style/imports.md). - 1. `mod` declarations. - 1. `pub mod` declarations. - -### Avoid `path` directives. [FIXME: needs RFC] - -Avoid using `#[path="..."]` directives; make the file system and -module hierarchy match, instead. - -### Use the module hierarchy to organize APIs into coherent sections. [FIXME] - -> **[FIXME]** Flesh this out with examples; explain what a "coherent -> section" is with examples. -> -> The module hierarchy defines both the public and internal API of your module. -> Breaking related functionality into submodules makes it understandable to both -> users and contributors to the module. - -### Place modules in their own file. [FIXME: needs RFC] - -> **[FIXME]** -> - "<100 lines" is arbitrary, but it's a clearer recommendation -> than "~1 page" or similar suggestions that vary by screen size, etc. - -For all except very short modules (<100 lines) and [tests](../testing/README.md), -place the module `foo` in a separate file, as in: - -```rust,ignore -pub mod foo; - -// in foo.rs or foo/mod.rs -pub fn bar() { println!("..."); } -/* ... */ -``` - -rather than declaring it inline: - -```rust,ignore -pub mod foo { - pub fn bar() { println!("..."); } - /* ... */ -} -``` - -#### Use subdirectories for modules with children. [FIXME: needs RFC] - -For modules that themselves have submodules, place the module in a separate -directory (e.g., `bar/mod.rs` for a module `bar`) rather than the same directory. - -Note the structure of -[`std::io`](https://doc.rust-lang.org/std/io/). Many of the submodules lack -children, like -[`io::fs`](https://doc.rust-lang.org/std/io/fs/) -and -[`io::stdio`](https://doc.rust-lang.org/std/io/stdio/). -On the other hand, -[`io::net`](https://doc.rust-lang.org/std/io/net/) -contains submodules, so it lives in a separate directory: - -```text -io/mod.rs - io/extensions.rs - io/fs.rs - io/net/mod.rs - io/net/addrinfo.rs - io/net/ip.rs - io/net/tcp.rs - io/net/udp.rs - io/net/unix.rs - io/pipe.rs - ... -``` - -While it is possible to define all of `io` within a single directory, -mirroring the module hierarchy in the directory structure makes -submodules of `io::net` easier to find. - -### Consider top-level definitions or reexports. [FIXME: needs RFC] - -For modules with submodules, -define or [reexport](https://doc.rust-lang.org/std/io/#reexports) commonly used -definitions at the top level: - -* Functionality relevant to the module itself or to many of its - children should be defined in `mod.rs`. -* Functionality specific to a submodule should live in that - submodule. Reexport at the top level for the most important or - common definitions. - -For example, -[`IoError`](https://doc.rust-lang.org/std/io/struct.IoError.html) -is defined in `io/mod.rs`, since it pertains to the entirety of `io`, -while -[`TcpStream`](https://doc.rust-lang.org/std/io/net/tcp/struct.TcpStream.html) -is defined in `io/net/tcp.rs` and reexported in the `io` module. - -### Use internal module hierarchies for organization. [FIXME: needs RFC] - -> **[FIXME]** -> - Referencing internal modules from the standard library is subject to -> becoming outdated. - -Internal module hierarchies (i.e., private submodules) may be used to -hide implementation details that are not part of the module's API. - -For example, in [`std::io`](https://doc.rust-lang.org/std/io/), `mod mem` -provides implementations for -[`BufReader`](https://doc.rust-lang.org/std/io/struct.BufReader.html) -and -[`BufWriter`](https://doc.rust-lang.org/std/io/struct.BufWriter.html), -but these are re-exported in `io/mod.rs` at the top level of the module: - -```rust,ignore -// libstd/io/mod.rs - -pub use self::mem::{MemReader, BufReader, MemWriter, BufWriter}; -/* ... */ -mod mem; -``` - -This hides the detail that there even exists a `mod mem` in `io`, and -helps keep code organized while offering freedom to change the -implementation. diff --git a/src/doc/style/features/traits/README.md b/src/doc/style/features/traits/README.md deleted file mode 100644 index 1893db2446..0000000000 --- a/src/doc/style/features/traits/README.md +++ /dev/null @@ -1,22 +0,0 @@ -% Traits - -Traits are probably Rust's most complex feature, supporting a wide range of use -cases and design tradeoffs. Patterns of trait usage are still emerging. - -### Know whether a trait will be used as an object. [FIXME: needs RFC] - -Trait objects have some [significant limitations](objects.md): methods -invoked through a trait object cannot use generics, and cannot use -`Self` except in receiver position. - -When designing a trait, decide early on whether the trait will be used -as an [object](objects.md) or as a [bound on generics](generics.md); -the tradeoffs are discussed in each of the linked sections. - -If a trait is meant to be used as an object, its methods should take -and return trait objects rather than use generics. - - -### Default methods [FIXME] - -> **[FIXME]** Guidelines for default methods. diff --git a/src/doc/style/features/traits/common.md b/src/doc/style/features/traits/common.md deleted file mode 100644 index e8699c7522..0000000000 --- a/src/doc/style/features/traits/common.md +++ /dev/null @@ -1,71 +0,0 @@ -% Common traits - -### Eagerly implement common traits. [FIXME: needs RFC] - -Rust's trait system does not allow _orphans_: roughly, every `impl` must live -either in the crate that defines the trait or the implementing -type. Consequently, crates that define new types should eagerly implement all -applicable, common traits. - -To see why, consider the following situation: - -* Crate `std` defines trait `Debug`. -* Crate `url` defines type `Url`, without implementing `Debug`. -* Crate `webapp` imports from both `std` and `url`, - -There is no way for `webapp` to add `Debug` to `url`, since it defines neither. -(Note: the newtype pattern can provide an efficient, but inconvenient -workaround; see [newtype for views](../types/newtype.md)) - -The most important common traits to implement from `std` are: - -```text -Clone, Debug, Hash, Eq -``` - -#### When safe, derive or otherwise implement `Send` and `Share`. [FIXME] - -> **[FIXME]**. This guideline is in flux while the "opt-in" nature of -> built-in traits is being decided. See https://github.com/rust-lang/rfcs/pull/127 - -### Prefer to derive, rather than implement. [FIXME: needs RFC] - -Deriving saves implementation effort, makes correctness trivial, and -automatically adapts to upstream changes. - -### Do not overload operators in surprising ways. [FIXME: needs RFC] - -Operators with built in syntax (`*`, `|`, and so on) can be provided for a type -by implementing the traits in `core::ops`. These operators come with strong -expectations: implement `Mul` only for an operation that bears some resemblance -to multiplication (and shares the expected properties, e.g. associativity), and -so on for the other traits. - -### The `Drop` trait - -The `Drop` trait is treated specially by the compiler as a way of -associating destructors with types. See -[the section on destructors](../../ownership/destructors.md) for -guidance. - -### The `Deref`/`DerefMut` traits - -#### Use `Deref`/`DerefMut` only for smart pointers. [FIXME: needs RFC] - -The `Deref` traits are used implicitly by the compiler in many circumstances, -and interact with method resolution. The relevant rules are designed -specifically to accommodate smart pointers, and so the traits should be used -only for that purpose. - -#### Do not fail within a `Deref`/`DerefMut` implementation. [FIXME: needs RFC] - -Because the `Deref` traits are invoked implicitly by the compiler in sometimes -subtle ways, failure during dereferencing can be extremely confusing. If a -dereference might not succeed, target the `Deref` trait as a `Result` or -`Option` type instead. - -#### Avoid inherent methods when implementing `Deref`/`DerefMut` [FIXME: needs RFC] - -The rules around method resolution and `Deref` are in flux, but inherent methods -on a type implementing `Deref` are likely to shadow any methods of the referent -with the same name. diff --git a/src/doc/style/features/traits/extensions.md b/src/doc/style/features/traits/extensions.md deleted file mode 100644 index fc3a03c01f..0000000000 --- a/src/doc/style/features/traits/extensions.md +++ /dev/null @@ -1,7 +0,0 @@ -% Using traits to add extension methods - -> **[FIXME]** Elaborate. - -### Consider using default methods rather than extension traits **[FIXME]** - -> **[FIXME]** Elaborate. diff --git a/src/doc/style/features/traits/generics.md b/src/doc/style/features/traits/generics.md deleted file mode 100644 index f9dac1272c..0000000000 --- a/src/doc/style/features/traits/generics.md +++ /dev/null @@ -1,67 +0,0 @@ -% Using traits for bounds on generics - -The most widespread use of traits is for writing generic functions or types. For -example, the following signature describes a function for consuming any iterator -yielding items of type `A` to produce a collection of `A`: - -```rust,ignore -fn from_iter>(iterator: T) -> SomeCollection -``` - -Here, the `Iterator` trait specifies an interface that a type `T` must -explicitly implement to be used by this generic function. - -**Pros**: - -* _Reusability_. Generic functions can be applied to an open-ended collection of - types, while giving a clear contract for the functionality those types must - provide. -* _Static dispatch and optimization_. Each use of a generic function is - specialized ("monomorphized") to the particular types implementing the trait - bounds, which means that (1) invocations of trait methods are static, direct - calls to the implementation and (2) the compiler can inline and otherwise - optimize these calls. -* _Inline layout_. If a `struct` and `enum` type is generic over some type - parameter `T`, values of type `T` will be laid out _inline_ in the - `struct`/`enum`, without any indirection. -* _Inference_. Since the type parameters to generic functions can usually be - inferred, generic functions can help cut down on verbosity in code where - explicit conversions or other method calls would usually be necessary. See the - overloading/implicits use case below. -* _Precise types_. Because generics give a _name_ to the specific type - implementing a trait, it is possible to be precise about places where that - exact type is required or produced. For example, a function - - ```rust,ignore - fn binary(x: T, y: T) -> T - ``` - - is guaranteed to consume and produce elements of exactly the same type `T`; it - cannot be invoked with parameters of different types that both implement - `Trait`. - -**Cons**: - -* _Code size_. Specializing generic functions means that the function body is - duplicated. The increase in code size must be weighed against the performance - benefits of static dispatch. -* _Homogeneous types_. This is the other side of the "precise types" coin: if - `T` is a type parameter, it stands for a _single_ actual type. So for example - a `Vec` contains elements of a single concrete type (and, indeed, the - vector representation is specialized to lay these out in line). Sometimes - heterogeneous collections are useful; see - trait objects below. -* _Signature verbosity_. Heavy use of generics can bloat function signatures. - **[Ed. note]** This problem may be mitigated by some language improvements; stay tuned. - -### Favor widespread traits. **[FIXME: needs RFC]** - -Generic types are a form of abstraction, which entails a mental indirection: if -a function takes an argument of type `T` bounded by `Trait`, clients must first -think about the concrete types that implement `Trait` to understand how and when -the function is callable. - -To keep the cost of abstraction low, favor widely-known traits. Whenever -possible, implement and use traits provided as part of the standard library. Do -not introduce new traits for generics lightly; wait until there are a wide range -of types that can implement the type. diff --git a/src/doc/style/features/traits/objects.md b/src/doc/style/features/traits/objects.md deleted file mode 100644 index 34712ed1ae..0000000000 --- a/src/doc/style/features/traits/objects.md +++ /dev/null @@ -1,49 +0,0 @@ -% Using trait objects - -> **[FIXME]** What are uses of trait objects other than heterogeneous collections? - -Trait objects are useful primarily when _heterogeneous_ collections of objects -need to be treated uniformly; it is the closest that Rust comes to -object-oriented programming. - -```rust,ignore -struct Frame { ... } -struct Button { ... } -struct Label { ... } - -trait Widget { ... } - -impl Widget for Frame { ... } -impl Widget for Button { ... } -impl Widget for Label { ... } - -impl Frame { - fn new(contents: &[Box]) -> Frame { - ... - } -} - -fn make_gui() -> Box { - let b: Box = box Button::new(...); - let l: Box = box Label::new(...); - - box Frame::new([b, l]) as Box -} -``` - -By using trait objects, we can set up a GUI framework with a `Frame` widget that -contains a heterogeneous collection of children widgets. - -**Pros**: - -* _Heterogeneity_. When you need it, you really need it. -* _Code size_. Unlike generics, trait objects do not generate specialized - (monomorphized) versions of code, which can greatly reduce code size. - -**Cons**: - -* _No generic methods_. Trait objects cannot currently provide generic methods. -* _Dynamic dispatch and fat pointers_. Trait objects inherently involve - indirection and vtable dispatch, which can carry a performance penalty. -* _No Self_. Except for the method receiver argument, methods on trait objects - cannot use the `Self` type. diff --git a/src/doc/style/features/traits/overloading.md b/src/doc/style/features/traits/overloading.md deleted file mode 100644 index d7482c9619..0000000000 --- a/src/doc/style/features/traits/overloading.md +++ /dev/null @@ -1,7 +0,0 @@ -% Using traits for overloading - -> **[FIXME]** Elaborate. - -> **[FIXME]** We need to decide on guidelines for this use case. There are a few -> patterns emerging in current Rust code, but it's not clear how widespread they -> should be. diff --git a/src/doc/style/features/traits/reuse.md b/src/doc/style/features/traits/reuse.md deleted file mode 100644 index feedd3937f..0000000000 --- a/src/doc/style/features/traits/reuse.md +++ /dev/null @@ -1,30 +0,0 @@ -% Using traits to share implementations - -> **[FIXME]** Elaborate. - -> **[FIXME]** We probably want to discourage this, at least when used in a way -> that is publicly exposed. - -Traits that provide default implementations for function can provide code reuse -across types. For example, a `print` method can be defined across multiple -types as follows: - -``` Rust -trait Printable { - // Default method implementation - fn print(&self) { println!("{:?}", *self) } -} - -impl Printable for i32 {} - -impl Printable for String { - fn print(&self) { println!("{}", *self) } -} - -impl Printable for bool {} - -impl Printable for f32 {} -``` - -This allows the implementation of `print` to be shared across types, yet -overridden where needed, as seen in the `impl` for `String`. diff --git a/src/doc/style/features/types/README.md b/src/doc/style/features/types/README.md deleted file mode 100644 index d3b95d8a6e..0000000000 --- a/src/doc/style/features/types/README.md +++ /dev/null @@ -1,68 +0,0 @@ -% Data types - -### Use custom types to imbue meaning; do not abuse `bool`, `Option` or other core types. **[FIXME: needs RFC]** - -Prefer - -```rust,ignore -let w = Widget::new(Small, Round) -``` - -over - -```rust,ignore -let w = Widget::new(true, false) -``` - -Core types like `bool`, `u8` and `Option` have many possible interpretations. - -Use custom types (whether `enum`s, `struct`, or tuples) to convey -interpretation and invariants. In the above example, -it is not immediately clear what `true` and `false` are conveying without -looking up the argument names, but `Small` and `Round` are more suggestive. - -Using custom types makes it easier to expand the -options later on, for example by adding an `ExtraLarge` variant. - -See [the newtype pattern](newtype.md) for a no-cost way to wrap -existing types with a distinguished name. - -### Prefer private fields, except for passive data. **[FIXME: needs RFC]** - -Making a field public is a strong commitment: it pins down a representation -choice, _and_ prevents the type from providing any validation or maintaining any -invariants on the contents of the field, since clients can mutate it arbitrarily. - -Public fields are most appropriate for `struct` types in the C spirit: compound, -passive data structures. Otherwise, consider providing getter/setter methods -and hiding fields instead. - -> **[FIXME]** Cross-reference validation for function arguments. - -### Use custom `enum`s for alternatives, `bitflags` for C-style flags. **[FIXME: needs RFC]** - -Rust supports `enum` types with "custom discriminants": - -~~~~ -enum Color { - Red = 0xff0000, - Green = 0x00ff00, - Blue = 0x0000ff -} -~~~~ - -Custom discriminants are useful when an `enum` type needs to be serialized to an -integer value compatibly with some other system/language. They support -"typesafe" APIs: by taking a `Color`, rather than an integer, a function is -guaranteed to get well-formed inputs, even if it later views those inputs as -integers. - -An `enum` allows an API to request exactly one choice from among many. Sometimes -an API's input is instead the presence or absence of a set of flags. In C code, -this is often done by having each flag correspond to a particular bit, allowing -a single integer to represent, say, 32 or 64 flags. Rust's `std::bitflags` -module provides a typesafe way for doing so. - -### Phantom types. [FIXME] - -> **[FIXME]** Add some material on phantom types (https://blog.mozilla.org/research/2014/06/23/static-checking-of-units-in-servo/) diff --git a/src/doc/style/features/types/conversions.md b/src/doc/style/features/types/conversions.md deleted file mode 100644 index f0f230f57e..0000000000 --- a/src/doc/style/features/types/conversions.md +++ /dev/null @@ -1,22 +0,0 @@ -% Conversions between types - -### Associate conversions with the most specific type involved. **[FIXME: needs RFC]** - -When in doubt, prefer `to_`/`as_`/`into_` to `from_`, because they are -more ergonomic to use (and can be chained with other methods). - -For many conversions between two types, one of the types is clearly more -"specific": it provides some additional invariant or interpretation that is not -present in the other type. For example, `str` is more specific than `&[u8]`, -since it is a utf-8 encoded sequence of bytes. - -Conversions should live with the more specific of the involved types. Thus, -`str` provides both the `as_bytes` method and the `from_utf8` constructor for -converting to and from `&[u8]` values. Besides being intuitive, this convention -avoids polluting concrete types like `&[u8]` with endless conversion methods. - -### Explicitly mark lossy conversions, or do not label them as conversions. **[FIXME: needs RFC]** - -If a function's name implies that it is a conversion (prefix `from_`, `as_`, -`to_` or `into_`), but the function loses information, add a suffix `_lossy` or -otherwise indicate the lossyness. Consider avoiding the conversion name prefix. diff --git a/src/doc/style/features/types/newtype.md b/src/doc/style/features/types/newtype.md deleted file mode 100644 index 9646e3e82a..0000000000 --- a/src/doc/style/features/types/newtype.md +++ /dev/null @@ -1,69 +0,0 @@ -% The newtype pattern - -A "newtype" is a tuple or `struct` with a single field. The terminology is borrowed from Haskell. - -Newtypes are a zero-cost abstraction: they introduce a new, distinct name for an -existing type, with no runtime overhead when converting between the two types. - -### Use newtypes to provide static distinctions. [FIXME: needs RFC] - -Newtypes can statically distinguish between different interpretations of an -underlying type. - -For example, a `f64` value might be used to represent a quantity in miles or in -kilometers. Using newtypes, we can keep track of the intended interpretation: - -```rust,ignore -struct Miles(pub f64); -struct Kilometers(pub f64); - -impl Miles { - fn as_kilometers(&self) -> Kilometers { ... } -} -impl Kilometers { - fn as_miles(&self) -> Miles { ... } -} -``` - -Once we have separated these two types, we can statically ensure that we do not -confuse them. For example, the function - -```rust,ignore -fn are_we_there_yet(distance_travelled: Miles) -> bool { ... } -``` - -cannot accidentally be called with a `Kilometers` value. The compiler will -remind us to perform the conversion, thus averting certain -[catastrophic bugs](http://en.wikipedia.org/wiki/Mars_Climate_Orbiter). - -### Use newtypes with private fields for hiding. [FIXME: needs RFC] - -A newtype can be used to hide representation details while making precise -promises to the client. - -For example, consider a function `my_transform` that returns a compound iterator -type `Enumerate>>`. We wish to hide this type from the -client, so that the client's view of the return type is roughly `Iterator<(usize, -T)>`. We can do so using the newtype pattern: - -```rust,ignore -struct MyTransformResult(Enumerate>>); -impl Iterator<(usize, T)> for MyTransformResult { ... } - -fn my_transform>(iter: Iter) -> MyTransformResult { - ... -} -``` - -Aside from simplifying the signature, this use of newtypes allows us to make a -expose and promise less to the client. The client does not know _how_ the result -iterator is constructed or represented, which means the representation can -change in the future without breaking client code. - -> **[FIXME]** Interaction with auto-deref. - -### Use newtypes to provide cost-free _views_ of another type. **[FIXME]** - -> **[FIXME]** Describe the pattern of using newtypes to provide a new set of -> inherent or trait methods, providing a different perspective on the underlying -> type. diff --git a/src/doc/style/ownership/README.md b/src/doc/style/ownership/README.md deleted file mode 100644 index 11bdb03a3a..0000000000 --- a/src/doc/style/ownership/README.md +++ /dev/null @@ -1,3 +0,0 @@ -% Ownership and resource management - -> **[FIXME]** Add general remarks about ownership/resources here. diff --git a/src/doc/style/ownership/builders.md b/src/doc/style/ownership/builders.md deleted file mode 100644 index 3422591233..0000000000 --- a/src/doc/style/ownership/builders.md +++ /dev/null @@ -1,176 +0,0 @@ -% The builder pattern - -Some data structures are complicated to construct, due to their construction needing: - -* a large number of inputs -* compound data (e.g. slices) -* optional configuration data -* choice between several flavors - -which can easily lead to a large number of distinct constructors with -many arguments each. - -If `T` is such a data structure, consider introducing a `T` _builder_: - -1. Introduce a separate data type `TBuilder` for incrementally configuring a `T` - value. When possible, choose a better name: e.g. `Command` is the builder for - `Process`. -2. The builder constructor should take as parameters only the data _required_ to - make a `T`. -3. The builder should offer a suite of convenient methods for configuration, - including setting up compound inputs (like slices) incrementally. - These methods should return `self` to allow chaining. -4. The builder should provide one or more "_terminal_" methods for actually building a `T`. - -The builder pattern is especially appropriate when building a `T` involves side -effects, such as spawning a thread or launching a process. - -In Rust, there are two variants of the builder pattern, differing in the -treatment of ownership, as described below. - -### Non-consuming builders (preferred): - -In some cases, constructing the final `T` does not require the builder itself to -be consumed. The follow variant on -[`std::process::Command`](https://doc.rust-lang.org/stable/std/process/struct.Command.html) -is one example: - -```rust,ignore -// NOTE: the actual Command API does not use owned Strings; -// this is a simplified version. - -pub struct Command { - program: String, - args: Vec, - cwd: Option, - // etc -} - -impl Command { - pub fn new(program: String) -> Command { - Command { - program: program, - args: Vec::new(), - cwd: None, - } - } - - /// Add an argument to pass to the program. - pub fn arg<'a>(&'a mut self, arg: String) -> &'a mut Command { - self.args.push(arg); - self - } - - /// Add multiple arguments to pass to the program. - pub fn args<'a>(&'a mut self, args: &[String]) - -> &'a mut Command { - self.args.push_all(args); - self - } - - /// Set the working directory for the child process. - pub fn cwd<'a>(&'a mut self, dir: String) -> &'a mut Command { - self.cwd = Some(dir); - self - } - - /// Executes the command as a child process, which is returned. - pub fn spawn(&self) -> std::io::Result { - ... - } -} -``` - -Note that the `spawn` method, which actually uses the builder configuration to -spawn a process, takes the builder by immutable reference. This is possible -because spawning the process does not require ownership of the configuration -data. - -Because the terminal `spawn` method only needs a reference, the configuration -methods take and return a mutable borrow of `self`. - -#### The benefit - -By using borrows throughout, `Command` can be used conveniently for both -one-liner and more complex constructions: - -```rust,ignore -// One-liners -Command::new("/bin/cat").arg("file.txt").spawn(); - -// Complex configuration -let mut cmd = Command::new("/bin/ls"); -cmd.arg("."); - -if size_sorted { - cmd.arg("-S"); -} - -cmd.spawn(); -``` - -### Consuming builders: - -Sometimes builders must transfer ownership when constructing the final type -`T`, meaning that the terminal methods must take `self` rather than `&self`: - -```rust,ignore -// A simplified excerpt from std::thread::Builder - -impl ThreadBuilder { - /// Name the thread-to-be. Currently the name is used for identification - /// only in failure messages. - pub fn named(mut self, name: String) -> ThreadBuilder { - self.name = Some(name); - self - } - - /// Redirect thread-local stdout. - pub fn stdout(mut self, stdout: Box) -> ThreadBuilder { - self.stdout = Some(stdout); - // ^~~~~~ this is owned and cannot be cloned/re-used - self - } - - /// Creates and executes a new child thread. - pub fn spawn(self, f: proc():Send) { - // consume self - ... - } -} -``` - -Here, the `stdout` configuration involves passing ownership of a `Writer`, -which must be transferred to the thread upon construction (in `spawn`). - -When the terminal methods of the builder require ownership, there is a basic tradeoff: - -* If the other builder methods take/return a mutable borrow, the complex - configuration case will work well, but one-liner configuration becomes - _impossible_. - -* If the other builder methods take/return an owned `self`, one-liners - continue to work well but complex configuration is less convenient. - -Under the rubric of making easy things easy and hard things possible, _all_ -builder methods for a consuming builder should take and returned an owned -`self`. Then client code works as follows: - -```rust,ignore -// One-liners -ThreadBuilder::new().named("my_thread").spawn(proc() { ... }); - -// Complex configuration -let mut thread = ThreadBuilder::new(); -thread = thread.named("my_thread_2"); // must re-assign to retain ownership - -if reroute { - thread = thread.stdout(mywriter); -} - -thread.spawn(proc() { ... }); -``` - -One-liners work as before, because ownership is threaded through each of the -builder methods until being consumed by `spawn`. Complex configuration, -however, is more verbose: it requires re-assigning the builder at each step. diff --git a/src/doc/style/ownership/cell-smart.md b/src/doc/style/ownership/cell-smart.md deleted file mode 100644 index cd027cc4aa..0000000000 --- a/src/doc/style/ownership/cell-smart.md +++ /dev/null @@ -1,4 +0,0 @@ -% Cells and smart pointers - -> **[FIXME]** Add guidelines about when to use Cell, RefCell, Rc and -> Arc (and how to use them together). diff --git a/src/doc/style/ownership/constructors.md b/src/doc/style/ownership/constructors.md deleted file mode 100644 index 51fc74ac11..0000000000 --- a/src/doc/style/ownership/constructors.md +++ /dev/null @@ -1,62 +0,0 @@ -% Constructors - -### Define constructors as static, inherent methods. [FIXME: needs RFC] - -In Rust, "constructors" are just a convention: - -```rust,ignore -impl Vec { - pub fn new() -> Vec { ... } -} -``` - -Constructors are static (no `self`) inherent methods for the type that they -construct. Combined with the practice of -[fully importing type names](../style/imports.md), this convention leads to -informative but concise construction: - -```rust,ignore -use vec::Vec; - -// construct a new vector -let mut v = Vec::new(); -``` - -This convention also applied to conversion constructors (prefix `from` rather -than `new`). - -### Provide constructors for passive `struct`s with defaults. [FIXME: needs RFC] - -Given the `struct` - -```rust,ignore -pub struct Config { - pub color: Color, - pub size: Size, - pub shape: Shape, -} -``` - -provide a constructor if there are sensible defaults: - -```rust,ignore -impl Config { - pub fn new() -> Config { - Config { - color: Brown, - size: Medium, - shape: Square, - } - } -} -``` - -which then allows clients to concisely override using `struct` update syntax: - -```rust,ignore -Config { color: Red, .. Config::new() }; -``` - -See the [guideline for field privacy](../features/types/README.md) for -discussion on when to create such "passive" `struct`s with public -fields. diff --git a/src/doc/style/ownership/destructors.md b/src/doc/style/ownership/destructors.md deleted file mode 100644 index 1cfcd78d20..0000000000 --- a/src/doc/style/ownership/destructors.md +++ /dev/null @@ -1,22 +0,0 @@ -% Destructors - -Unlike constructors, destructors in Rust have a special status: they are added -by implementing `Drop` for a type, and they are automatically invoked as values -go out of scope. - -> **[FIXME]** This section needs to be expanded. - -### Destructors should not fail. [FIXME: needs RFC] - -Destructors are executed on thread failure, and in that context a failing -destructor causes the program to abort. - -Instead of failing in a destructor, provide a separate method for checking for -clean teardown, e.g. a `close` method, that returns a `Result` to signal -problems. - -### Destructors should not block. [FIXME: needs RFC] - -Similarly, destructors should not invoke blocking operations, which can make -debugging much more difficult. Again, consider providing a separate method for -preparing for an infallible, nonblocking teardown. diff --git a/src/doc/style/ownership/raii.md b/src/doc/style/ownership/raii.md deleted file mode 100644 index 244e8096a1..0000000000 --- a/src/doc/style/ownership/raii.md +++ /dev/null @@ -1,12 +0,0 @@ -% RAII - -Resource Acquisition is Initialization - -> **[FIXME]** Explain the RAII pattern and give best practices. - -### Whenever possible, tie resource access to guard scopes [FIXME] - -> **[FIXME]** Example: Mutex guards guarantee that access to the -> protected resource only happens when the guard is in scope. - -`must_use` diff --git a/src/doc/style/platform.md b/src/doc/style/platform.md deleted file mode 100644 index d29d060b69..0000000000 --- a/src/doc/style/platform.md +++ /dev/null @@ -1,7 +0,0 @@ -% FFI and platform-specific code **[FIXME]** - -> **[FIXME]** Not sure where this should live. - -When writing cross-platform code, group platform-specific code into a -module called `platform`. Avoid `#[cfg]` directives outside this -`platform` module. diff --git a/src/doc/style/safety/README.md b/src/doc/style/safety/README.md deleted file mode 100644 index 1ac6e704d2..0000000000 --- a/src/doc/style/safety/README.md +++ /dev/null @@ -1,19 +0,0 @@ -% Safety and guarantees - -> **[FIXME]** Is there a better phrase than "strong guarantees" that encompasses -> both e.g. memory safety and e.g. data structure invariants? - -A _guarantee_ is a property that holds no matter what client code does, unless -the client explicitly opts out: - -* Rust guarantees memory safety and data-race freedom, with `unsafe` - blocks as an opt-out mechanism. - -* APIs in Rust often provide their own guarantees. For example, `std::str` -guarantees that its underlying buffer is valid utf-8. The `std::path::Path` type -guarantees no interior nulls. Both strings and paths provide `unsafe` mechanisms -for opting out of these guarantees (and thereby avoiding runtime checks). - -Thinking about guarantees is an essential part of writing good Rust code. The -rest of this subsection outlines some cross-cutting principles around -guarantees. diff --git a/src/doc/style/safety/lib-guarantees.md b/src/doc/style/safety/lib-guarantees.md deleted file mode 100644 index 8ee64f1806..0000000000 --- a/src/doc/style/safety/lib-guarantees.md +++ /dev/null @@ -1,81 +0,0 @@ -% Library-level guarantees - -Most libraries rely on internal invariants, e.g. about their data, resource -ownership, or protocol states. In Rust, broken invariants cannot produce -segfaults, but they can still lead to wrong answers. - -### Provide library-level guarantees whenever practical. **[FIXME: needs RFC]** - -Library-level invariants should be turned into guarantees whenever -practical. They should hold no matter what the client does, modulo -explicit opt-outs. Depending on the kind of invariant, this can be -achieved through a combination of static and dynamic enforcement, as -described below. - -#### Static enforcement: - -Guaranteeing invariants almost always requires _hiding_, -i.e. preventing the client from directly accessing or modifying -internal data. - -For example, the representation of the `str` type is hidden, -which means that any value of type `str` must have been produced -through an API under the control of the `str` module, and these -APIs in turn ensure valid utf-8 encoding. - -Rust's type system makes it possible to provide guarantees even while -revealing more of the representation than usual. For example, the -`as_bytes()` method on `&str` gives a _read-only_ view into the -underlying buffer, which cannot be used to violate the utf-8 property. - -#### Dynamic enforcement: - -Malformed inputs from the client are hazards to library-level -guarantees, so library APIs should validate their input. - -For example, `std::str::from_utf8_owned` attempts to convert a `u8` -slice into an owned string, but dynamically checks that the slice is -valid utf-8 and returns `Err` if not. - -See -[the discussion on input validation](../features/functions-and-methods/input.md) -for more detail. - - -### Prefer static enforcement of guarantees. **[FIXME: needs RFC]** - -Static enforcement provides two strong benefits over dynamic enforcement: - -* Bugs are caught at compile time. -* There is no runtime cost. - -Sometimes purely static enforcement is impossible or impractical. In these -cases, a library should check as much as possible statically, but defer to -dynamic checks where needed. - -For example, the `std::string` module exports a `String` type with the guarantee -that all instances are valid utf-8: - -* Any _consumer_ of a `String` is statically guaranteed utf-8 contents. For example, - the `append` method can push a `&str` onto the end of a `String` without - checking anything dynamically, since the existing `String` and `&str` are - statically guaranteed to be in utf-8. - -* Some _producers_ of a `String` must perform dynamic checks. For example, the - `from_utf8` function attempts to convert a `Vec` into a `String`, but - dynamically checks that the contents are utf-8. - -### Provide opt-outs with caution; make them explicit. **[FIXME: needs RFC]** - -Providing library-level guarantees sometimes entails inconvenience (for static -checks) or overhead (for dynamic checks). So it is sometimes desirable to allow -clients to sidestep this checking, while promising to use the API in a way that -still provides the guarantee. Such escape hatches should only be introduced when -there is a demonstrated need for them. - -It should be trivial for clients to audit their use of the library for -escape hatches. - -See -[the discussion on input validation](../features/functions-and-methods/input.md) -for conventions on marking opt-out functions. diff --git a/src/doc/style/safety/unsafe.md b/src/doc/style/safety/unsafe.md deleted file mode 100644 index a8a50af044..0000000000 --- a/src/doc/style/safety/unsafe.md +++ /dev/null @@ -1,22 +0,0 @@ -% Using `unsafe` - -### Unconditionally guarantee safety, or mark API as `unsafe`. **[FIXME: needs RFC]** - -Memory safety, type safety, and data race freedom are basic assumptions for all -Rust code. - -APIs that use `unsafe` blocks internally thus have two choices: - -* They can guarantee safety _unconditionally_ (i.e., regardless of client - behavior or inputs) and be exported as safe code. Any safety violation is then - the library's fault, not the client's fault. - -* They can export potentially unsafe functions with the `unsafe` qualifier. In - this case, the documentation should make very clear the conditions under which - safety is guaranteed. - -The result is that a client program can never violate safety merely by having a -bug; it must have explicitly opted out by using an `unsafe` block. - -Of the two options for using `unsafe`, creating such safe abstractions (the -first option above) is strongly preferred. diff --git a/src/doc/style/style/README.md b/src/doc/style/style/README.md deleted file mode 100644 index 8744971054..0000000000 --- a/src/doc/style/style/README.md +++ /dev/null @@ -1,5 +0,0 @@ -% Style - -This section gives a set of strict rules for styling Rust code. - -> **[FIXME]** General remarks about the style guidelines diff --git a/src/doc/style/style/braces.md b/src/doc/style/style/braces.md deleted file mode 100644 index 80323dba1d..0000000000 --- a/src/doc/style/style/braces.md +++ /dev/null @@ -1,77 +0,0 @@ -% Braces, semicolons, and commas [FIXME: needs RFC] - -### Opening braces always go on the same line. - -```rust,ignore -fn foo() { - ... -} - -fn frobnicate(a: Bar, b: Bar, - c: Bar, d: Bar) - -> Bar { - ... -} - -trait Bar { - fn baz(&self); -} - -impl Bar for Baz { - fn baz(&self) { - ... - } -} - -frob(|x| { - x.transpose() -}) -``` - -### `match` arms get braces, except for single-line expressions. - -```rust,ignore -match foo { - bar => baz, - quux => { - do_something(); - do_something_else() - } -} -``` - -### `return` statements get semicolons. - -```rust,ignore -fn foo() { - do_something(); - - if condition() { - return; - } - - do_something_else(); -} -``` - -### Trailing commas - -> **[FIXME]** We should have a guideline for when to include trailing -> commas in `struct`s, `match`es, function calls, etc. -> -> One possible rule: a trailing comma should be included whenever the -> closing delimiter appears on a separate line: - -```rust,ignore -Foo { bar: 0, baz: 1 } - -Foo { - bar: 0, - baz: 1, -} - -match a_thing { - None => 0, - Some(x) => 1, -} -``` diff --git a/src/doc/style/style/comments.md b/src/doc/style/style/comments.md deleted file mode 100644 index af02d87cc8..0000000000 --- a/src/doc/style/style/comments.md +++ /dev/null @@ -1,122 +0,0 @@ -% Comments [RFC #505] - -### Avoid block comments. - -Use line comments: - -```rust -// Wait for the main thread to return, and set the process error code -// appropriately. -``` - -Instead of: - -``` rust -/* - * Wait for the main thread to return, and set the process error code - * appropriately. - */ -``` - -## Doc comments - -Doc comments are prefixed by three slashes (`///`) and indicate -documentation that you would like to be included in Rustdoc's output. -They support -[Markdown syntax](https://en.wikipedia.org/wiki/Markdown) -and are the main way of documenting your public APIs. - -The supported markdown syntax includes all of the extensions listed in the -[GitHub Flavored Markdown] -(https://help.github.com/articles/github-flavored-markdown) documentation, -plus superscripts. - -### Summary line - -The first line in any doc comment should be a single-line short sentence -providing a summary of the code. This line is used as a short summary -description throughout Rustdoc's output, so it's a good idea to keep it -short. - -### Sentence structure - -All doc comments, including the summary line, should begin with a -capital letter and end with a period, question mark, or exclamation -point. Prefer full sentences to fragments. - -The summary line should be written in -[third person singular present indicative form] -(http://en.wikipedia.org/wiki/English_verbs#Third_person_singular_present). -Basically, this means write "Returns" instead of "Return". - -For example: - -```rust,ignore -/// Sets up a default runtime configuration, given compiler-supplied arguments. -/// -/// This function will block until the entire pool of M:N schedulers has -/// exited. This function also requires a local thread to be available. -/// -/// # Arguments -/// -/// * `argc` & `argv` - The argument vector. On Unix this information is used -/// by `os::args`. -/// * `main` - The initial procedure to run inside of the M:N scheduling pool. -/// Once this procedure exits, the scheduling pool will begin to shut -/// down. The entire pool (and this function) will only return once -/// all child threads have finished executing. -/// -/// # Return value -/// -/// The return value is used as the process return code. 0 on success, 101 on -/// error. -``` - -### Code snippets - -Only use inner doc comments `//!` to write crate and module-level documentation, -nothing else. When using `mod` blocks, prefer `///` outside of the block: - -```rust -/// This module contains tests -mod test { - // ... -} -``` - -over - -```rust -mod test { - //! This module contains tests - - // ... -} -``` - -### Avoid inner doc comments. - -Use inner doc comments _only_ to document crates and file-level modules: - -```rust,ignore -//! The core library. -//! -//! The core library is a something something... -``` - -### Explain context. - -Rust doesn't have special constructors, only functions that return new -instances. These aren't visible in the automatically generated documentation -for a type, so you should specifically link to them: - -```rust,ignore -/// An iterator that yields `None` forever after the underlying iterator -/// yields `None` once. -/// -/// These can be created through -/// [`iter.fuse()`](trait.Iterator.html#method.fuse). -pub struct Fuse { - // ... -} -``` diff --git a/src/doc/style/style/features.md b/src/doc/style/style/features.md deleted file mode 100644 index 13cc37fc23..0000000000 --- a/src/doc/style/style/features.md +++ /dev/null @@ -1,13 +0,0 @@ -## `return` [RFC #968] - -Terminate `return` statements with semicolons: - -``` rust,ignore -fn foo(bar: i32) -> Option { - if some_condition() { - return None; - } - - ... -} -``` diff --git a/src/doc/style/style/imports.md b/src/doc/style/style/imports.md deleted file mode 100644 index c958875ddb..0000000000 --- a/src/doc/style/style/imports.md +++ /dev/null @@ -1,50 +0,0 @@ -% Imports [FIXME: needs RFC] - -The imports of a crate/module should consist of the following -sections, in order, with a blank space between each: - -* `extern crate` directives -* external `use` imports -* local `use` imports -* `pub use` imports - -For example: - -```rust,ignore -// Crates. -extern crate getopts; -extern crate mylib; - -// Standard library imports. -use getopts::{optopt, getopts}; -use std::os; - -// Import from a library that we wrote. -use mylib::webserver; - -// Will be reexported when we import this module. -pub use self::types::Webdata; -``` - -### Avoid `use *`, except in tests. - -Glob imports have several downsides: -* They make it harder to tell where names are bound. -* They are forwards-incompatible, since new upstream exports can clash - with existing names. - -When writing a [`test` submodule](../testing/README.md), importing `super::*` is appropriate -as a convenience. - -### Prefer fully importing types/traits while module-qualifying functions. - -For example: - -```rust,ignore -use option::Option; -use mem; - -let i: isize = mem::transmute(Option(0)); -``` - -> **[FIXME]** Add rationale. diff --git a/src/doc/style/style/naming/README.md b/src/doc/style/style/naming/README.md deleted file mode 100644 index 6d88a838f5..0000000000 --- a/src/doc/style/style/naming/README.md +++ /dev/null @@ -1,115 +0,0 @@ -% Naming conventions - -### General conventions [RFC #430] - -> The guidelines below were approved by [RFC #430](https://github.com/rust-lang/rfcs/pull/430). - -In general, Rust tends to use `CamelCase` for "type-level" constructs -(types and traits) and `snake_case` for "value-level" constructs. More -precisely: - -| Item | Convention | -| ---- | ---------- | -| Crates | `snake_case` (but prefer single word) | -| Modules | `snake_case` | -| Types | `CamelCase` | -| Traits | `CamelCase` | -| Enum variants | `CamelCase` | -| Functions | `snake_case` | -| Methods | `snake_case` | -| General constructors | `new` or `with_more_details` | -| Conversion constructors | `from_some_other_type` | -| Local variables | `snake_case` | -| Static variables | `SCREAMING_SNAKE_CASE` | -| Constant variables | `SCREAMING_SNAKE_CASE` | -| Type parameters | concise `CamelCase`, usually single uppercase letter: `T` | -| Lifetimes | short, lowercase: `'a` | - -

-In `CamelCase`, acronyms count as one word: use `Uuid` rather than -`UUID`. In `snake_case`, acronyms are lower-cased: `is_xid_start`. - -In `snake_case` or `SCREAMING_SNAKE_CASE`, a "word" should never -consist of a single letter unless it is the last "word". So, we have -`btree_map` rather than `b_tree_map`, but `PI_2` rather than `PI2`. - -### Referring to types in function/method names [RFC 344] - -> The guidelines below were approved by [RFC #344](https://github.com/rust-lang/rfcs/pull/344). - -Function names often involve type names, the most common example being conversions -like `as_slice`. If the type has a purely textual name (ignoring parameters), it -is straightforward to convert between type conventions and function conventions: - -Type name | Text in methods ---------- | --------------- -`String` | `string` -`Vec` | `vec` -`YourType`| `your_type` - -Types that involve notation follow the convention below. There is some -overlap on these rules; apply the most specific applicable rule: - -Type name | Text in methods ---------- | --------------- -`&str` | `str` -`&[T]` | `slice` -`&mut [T]`| `mut_slice` -`&[u8]` | `bytes` -`&T` | `ref` -`&mut T` | `mut` -`*const T`| `ptr` -`*mut T` | `mut_ptr` - -### Avoid redundant prefixes [RFC 356] - -> The guidelines below were approved by [RFC #356](https://github.com/rust-lang/rfcs/pull/356). - -Names of items within a module should not be prefixed with that module's name: - -Prefer - -```rust,ignore -mod foo { - pub struct Error { ... } -} -``` - -over - -```rust,ignore -mod foo { - pub struct FooError { ... } -} -``` - -This convention avoids stuttering (like `io::IoError`). Library clients can -rename on import to avoid clashes. - -### Getter/setter methods [RFC 344] - -> The guidelines below were approved by [RFC #344](https://github.com/rust-lang/rfcs/pull/344). - -Some data structures do not wish to provide direct access to their fields, but -instead offer "getter" and "setter" methods for manipulating the field state -(often providing checking or other functionality). - -The convention for a field `foo: T` is: - -* A method `foo(&self) -> &T` for getting the current value of the field. -* A method `set_foo(&self, val: T)` for setting the field. (The `val` argument - here may take `&T` or some other type, depending on the context.) - -Note that this convention is about getters/setters on ordinary data types, *not* -on [builder objects](../../ownership/builders.html). - -### Escape hatches [FIXME] - -> **[FIXME]** Should we standardize a convention for functions that may break API -> guarantees? e.g. `ToCStr::to_c_str_unchecked` - -### Predicates - -* Simple boolean predicates should be prefixed with `is_` or another - short question word, e.g., `is_empty`. -* Common exceptions: `lt`, `gt`, and other established predicate names. diff --git a/src/doc/style/style/naming/containers.md b/src/doc/style/style/naming/containers.md deleted file mode 100644 index c352a5b1bf..0000000000 --- a/src/doc/style/style/naming/containers.md +++ /dev/null @@ -1,69 +0,0 @@ -% Common container/wrapper methods [FIXME: needs RFC] - -Containers, wrappers, and cells all provide ways to access the data -they enclose. Accessor methods often have variants to access the data -by value, by reference, and by mutable reference. - -In general, the `get` family of methods is used to access contained -data without any risk of thread failure; they return `Option` as -appropriate. This name is chosen rather than names like `find` or -`lookup` because it is appropriate for a wider range of container types. - -#### Containers - -For a container with keys/indexes of type `K` and elements of type `V`: - -```rust,ignore -// Look up element without failing -fn get(&self, key: K) -> Option<&V> -fn get_mut(&mut self, key: K) -> Option<&mut V> - -// Convenience for .get(key).map(|elt| elt.clone()) -fn get_clone(&self, key: K) -> Option - -// Lookup element, failing if it is not found: -impl Index for Container { ... } -impl IndexMut for Container { ... } -``` - -#### Wrappers/Cells - -Prefer specific conversion functions like `as_bytes` or `into_vec` whenever -possible. Otherwise, use: - -```rust,ignore -// Extract contents without failing -fn get(&self) -> &V -fn get_mut(&mut self) -> &mut V -fn unwrap(self) -> V -``` - -#### Wrappers/Cells around `Copy` data - -```rust,ignore -// Extract contents without failing -fn get(&self) -> V -``` - -#### `Option`-like types - -Finally, we have the cases of types like `Option` and `Result`, which -play a special role for failure. - -For `Option`: - -```rust,ignore -// Extract contents or fail if not available -fn assert(self) -> V -fn expect(self, &str) -> V -``` - -For `Result`: - -```rust,ignore -// Extract the contents of Ok variant; fail if Err -fn assert(self) -> V - -// Extract the contents of Err variant; fail if Ok -fn assert_err(self) -> E -``` diff --git a/src/doc/style/style/naming/conversions.md b/src/doc/style/style/naming/conversions.md deleted file mode 100644 index 0287919c78..0000000000 --- a/src/doc/style/style/naming/conversions.md +++ /dev/null @@ -1,32 +0,0 @@ -% Conversions [Rust issue #7087] - -> The guidelines below were approved by [rust issue #7087](https://github.com/rust-lang/rust/issues/7087). - -> **[FIXME]** Should we provide standard traits for conversions? Doing -> so nicely will require -> [trait reform](https://github.com/rust-lang/rfcs/pull/48) to land. - -Conversions should be provided as methods, with names prefixed as follows: - -| Prefix | Cost | Consumes convertee | -| ------ | ---- | ------------------ | -| `as_` | Free | No | -| `to_` | Expensive | No | -| `into_` | Variable | Yes | - -

-For example: - -* `as_bytes()` gives a `&[u8]` view into a `&str`, which is a no-op. -* `to_owned()` copies a `&str` to a new `String`. -* `into_bytes()` consumes a `String` and yields the underlying - `Vec`, which is a no-op. - -Conversions prefixed `as_` and `into_` typically _decrease abstraction_, either -exposing a view into the underlying representation (`as`) or deconstructing data -into its underlying representation (`into`). Conversions prefixed `to_`, on the -other hand, typically stay at the same level of abstraction but do some work to -change one representation into another. - -> **[FIXME]** The distinctions between conversion methods does not work -> so well for `from_` conversion constructors. Is that a problem? diff --git a/src/doc/style/style/naming/iterators.md b/src/doc/style/style/naming/iterators.md deleted file mode 100644 index 945cbe4800..0000000000 --- a/src/doc/style/style/naming/iterators.md +++ /dev/null @@ -1,32 +0,0 @@ -% Iterators - -#### Method names [RFC #199] - -> The guidelines below were approved by [RFC #199](https://github.com/rust-lang/rfcs/pull/199). - -For a container with elements of type `U`, iterator methods should be named: - -```rust,ignore -fn iter(&self) -> T // where T implements Iterator<&U> -fn iter_mut(&mut self) -> T // where T implements Iterator<&mut U> -fn into_iter(self) -> T // where T implements Iterator -``` - -The default iterator variant yields shared references `&U`. - -#### Type names [RFC #344] - -> The guidelines below were approved by [RFC #344](https://github.com/rust-lang/rfcs/pull/344). - -The name of an iterator type should be the same as the method that -produces the iterator. - -For example: - -* `iter` should yield an `Iter` -* `iter_mut` should yield an `IterMut` -* `into_iter` should yield an `IntoIter` -* `keys` should yield `Keys` - -These type names make the most sense when prefixed with their owning module, -e.g. `vec::IntoIter`. diff --git a/src/doc/style/style/naming/ownership.md b/src/doc/style/style/naming/ownership.md deleted file mode 100644 index 32cd8a1595..0000000000 --- a/src/doc/style/style/naming/ownership.md +++ /dev/null @@ -1,34 +0,0 @@ -% Ownership variants [RFC #199] - -> The guidelines below were approved by [RFC #199](https://github.com/rust-lang/rfcs/pull/199). - -Functions often come in multiple variants: immutably borrowed, mutably -borrowed, and owned. - -The right default depends on the function in question. Variants should -be marked through suffixes. - -#### Immutably borrowed by default - -If `foo` uses/produces an immutable borrow by default, use: - -* The `_mut` suffix (e.g. `foo_mut`) for the mutably borrowed variant. -* The `_move` suffix (e.g. `foo_move`) for the owned variant. - -#### Owned by default - -If `foo` uses/produces owned data by default, use: - -* The `_ref` suffix (e.g. `foo_ref`) for the immutably borrowed variant. -* The `_mut` suffix (e.g. `foo_mut`) for the mutably borrowed variant. - -#### Exceptions - -In the case of iterators, the moving variant can also be understood as -an `into` conversion, `into_iter`, and `for x in v.into_iter()` reads -arguably better than `for x in v.iter_move()`, so the convention is -`into_iter`. - -For mutably borrowed variants, if the `mut` qualifier is part of a -type name (e.g. `as_mut_slice`), it should appear as it would appear -in the type. diff --git a/src/doc/style/style/optional.md b/src/doc/style/style/optional.md deleted file mode 100644 index d3c2178cc9..0000000000 --- a/src/doc/style/style/optional.md +++ /dev/null @@ -1,3 +0,0 @@ -* - -* diff --git a/src/doc/style/style/organization.md b/src/doc/style/style/organization.md deleted file mode 100644 index 85065406d7..0000000000 --- a/src/doc/style/style/organization.md +++ /dev/null @@ -1,14 +0,0 @@ -% Organization [FIXME: needs RFC] - -> **[FIXME]** What else? - -### Reexport the most important types at the crate level. - -Crates `pub use` the most common types for convenience, so that clients do not -have to remember or write the crate's module hierarchy to use these types. - -### Define types and operations together. - -Type definitions and the functions/methods that operate on them should be -defined together in a single module, with the type appearing above the -functions/methods. diff --git a/src/doc/style/style/whitespace.md b/src/doc/style/style/whitespace.md deleted file mode 100644 index c33c17c8e4..0000000000 --- a/src/doc/style/style/whitespace.md +++ /dev/null @@ -1,133 +0,0 @@ -% Whitespace [FIXME: needs RFC] - -* Lines must not exceed 99 characters. -* Use 4 spaces for indentation, _not_ tabs. -* No trailing whitespace at the end of lines or files. - -### Spaces - -* Use spaces around binary operators, including the equals sign in attributes: - -```rust,ignore -#[deprecated = "Use `bar` instead."] -fn foo(a: usize, b: usize) -> usize { - a + b -} -``` - -* Use a space after colons and commas: - -```rust,ignore -fn foo(a: Bar); - -MyStruct { foo: 3, bar: 4 } - -foo(bar, baz); -``` - -* Use a space after the opening and before the closing brace for - single line blocks or `struct` expressions: - -```rust,ignore -spawn(proc() { do_something(); }) - -Point { x: 0.1, y: 0.3 } -``` - -### Line wrapping - -* For multiline function signatures, each new line should align with the - first parameter. Multiple parameters per line are permitted: - -```rust,ignore -fn frobnicate(a: Bar, b: Bar, - c: Bar, d: Bar) - -> Bar { - ... -} - -fn foo( - a: Bar, - b: Bar) - -> Baz { - ... -} -``` - -* Multiline function invocations generally follow the same rule as for - signatures. However, if the final argument begins a new block, the - contents of the block may begin on a new line, indented one level: - -```rust,ignore -fn foo_bar(a: Bar, b: Bar, - c: |Bar|) -> Bar { - ... -} - -// Same line is fine: -foo_bar(x, y, |z| { z.transpose(y) }); - -// Indented body on new line is also fine: -foo_bar(x, y, |z| { - z.quux(); - z.rotate(x) -}) -``` - -> **[FIXME]** Do we also want to allow the following? -> -> ```rust,ignore -> frobnicate( -> arg1, -> arg2, -> arg3) -> ``` -> -> This style could ease the conflict between line length and functions -> with many parameters (or long method chains). - -### Matches - -> * **[Deprecated]** If you have multiple patterns in a single `match` -> arm, write each pattern on a separate line: -> -> ```rust,ignore -> match foo { -> bar(_) -> | baz => quux, -> x -> | y -> | z => { -> quuux -> } -> } -> ``` - -### Alignment - -Idiomatic code should not use extra whitespace in the middle of a line -to provide alignment. - - -```rust,ignore -// Good -struct Foo { - short: f64, - really_long: f64, -} - -// Bad -struct Bar { - short: f64, - really_long: f64, -} - -// Good -let a = 0; -let radius = 7; - -// Bad -let b = 0; -let diameter = 7; -``` diff --git a/src/doc/style/testing/README.md b/src/doc/style/testing/README.md deleted file mode 100644 index a21f69414d..0000000000 --- a/src/doc/style/testing/README.md +++ /dev/null @@ -1,5 +0,0 @@ -% Testing - -> **[FIXME]** Add some general remarks about when and how to unit -> test, versus other kinds of testing. What are our expectations for -> Rust's core libraries? diff --git a/src/doc/style/testing/unit.md b/src/doc/style/testing/unit.md deleted file mode 100644 index dbbe9fc3ac..0000000000 --- a/src/doc/style/testing/unit.md +++ /dev/null @@ -1,30 +0,0 @@ -% Unit testing - -Unit tests should live in a `tests` submodule at the bottom of the module they -test. Mark the `tests` submodule with `#[cfg(test)]` so it is only compiled when -testing. - -The `tests` module should contain: - -* Imports needed only for testing. -* Functions marked with `#[test]` striving for full coverage of the parent module's - definitions. -* Auxiliary functions needed for writing the tests. - -For example: - -``` rust -// Excerpt from std::str - -#[cfg(test)] -mod tests { - #[test] - fn test_eq() { - assert!((eq(&"".to_owned(), &"".to_owned()))); - assert!((eq(&"foo".to_owned(), &"foo".to_owned()))); - assert!((!eq(&"foo".to_owned(), &"bar".to_owned()))); - } -} -``` - -> **[FIXME]** add details about useful macros for testing, e.g. `assert!` diff --git a/src/doc/style/todo.md b/src/doc/style/todo.md deleted file mode 100644 index 28ef2a1832..0000000000 --- a/src/doc/style/todo.md +++ /dev/null @@ -1,5 +0,0 @@ -* [Containers and iteration]() -* [The visitor pattern]() -* [Concurrency]() -* [Documentation]() -* [Macros]() diff --git a/src/etc/CONFIGS.md b/src/etc/CONFIGS.md index cde7094cec..542b7bf797 100644 --- a/src/etc/CONFIGS.md +++ b/src/etc/CONFIGS.md @@ -6,6 +6,7 @@ These are some links to repos with configs which ease the use of rust. * [rust.vim](https://github.com/rust-lang/rust.vim) * [emacs rust-mode](https://github.com/rust-lang/rust-mode) +* [sublime-rust](https://github.com/rust-lang/sublime-rust) * [gedit-config](https://github.com/rust-lang/gedit-config) * [kate-config](https://github.com/rust-lang/kate-config) * [nano-config](https://github.com/rust-lang/nano-config) diff --git a/src/etc/debugger_pretty_printers_common.py b/src/etc/debugger_pretty_printers_common.py index b2bb785966..eb562877c8 100644 --- a/src/etc/debugger_pretty_printers_common.py +++ b/src/etc/debugger_pretty_printers_common.py @@ -324,3 +324,20 @@ def extract_length_and_ptr_from_slice(slice_val): assert data_ptr.type.get_dwarf_type_kind() == DWARF_TYPE_CODE_PTR return (length, data_ptr) + +UNQUALIFIED_TYPE_MARKERS = frozenset(["(", "[", "&", "*"]) + +def extract_type_name(qualified_type_name): + """Extracts the type name from a fully qualified path""" + if qualified_type_name[0] in UNQUALIFIED_TYPE_MARKERS: + return qualified_type_name + + end_of_search = qualified_type_name.find("<") + if end_of_search < 0: + end_of_search = len(qualified_type_name) + + index = qualified_type_name.rfind("::", 0, end_of_search) + if index < 0: + return qualified_type_name + else: + return qualified_type_name[index + 2:] diff --git a/src/etc/gdb_rust_pretty_printing.py b/src/etc/gdb_rust_pretty_printing.py index 554ab66bc5..afac8d6bba 100755 --- a/src/etc/gdb_rust_pretty_printing.py +++ b/src/etc/gdb_rust_pretty_printing.py @@ -16,7 +16,7 @@ import debugger_pretty_printers_common as rustpp # We want a version of `range` which doesn't allocate an intermediate list, # specifically it should use a lazy iterator. In Python 2 this was `xrange`, but # if we're running with Python 3 then we need to use `range` instead. -if sys.version_info.major >= 3: +if sys.version_info[0] >= 3: xrange = range #=============================================================================== @@ -36,7 +36,7 @@ class GdbType(rustpp.Type): if tag is None: return tag - return tag.replace("&'static ", "&") + return rustpp.extract_type_name(tag).replace("&'static ", "&") def get_dwarf_type_kind(self): if self.ty.code == gdb.TYPE_CODE_STRUCT: @@ -170,7 +170,7 @@ def rust_pretty_printer_lookup_function(gdb_val): #=------------------------------------------------------------------------------ # Pretty Printer Classes #=------------------------------------------------------------------------------ -class RustStructPrinter: +class RustStructPrinter(object): def __init__(self, val, omit_first_field, omit_type_name, is_tuple_like): self.__val = val self.__omit_first_field = omit_first_field @@ -205,11 +205,12 @@ class RustStructPrinter: return "" -class RustSlicePrinter: +class RustSlicePrinter(object): def __init__(self, val): self.__val = val - def display_hint(self): + @staticmethod + def display_hint(): return "array" def to_string(self): @@ -226,7 +227,7 @@ class RustSlicePrinter: yield (str(index), (raw_ptr + index).dereference()) -class RustStringSlicePrinter: +class RustStringSlicePrinter(object): def __init__(self, val): self.__val = val @@ -236,11 +237,12 @@ class RustStringSlicePrinter: return '"%s"' % raw_ptr.string(encoding="utf-8", length=length) -class RustStdVecPrinter: +class RustStdVecPrinter(object): def __init__(self, val): self.__val = val - def display_hint(self): + @staticmethod + def display_hint(): return "array" def to_string(self): @@ -255,7 +257,7 @@ class RustStdVecPrinter: yield (str(index), (gdb_ptr + index).dereference()) -class RustStdStringPrinter: +class RustStdStringPrinter(object): def __init__(self, val): self.__val = val @@ -266,7 +268,7 @@ class RustStdStringPrinter: length=length) -class RustCStyleVariantPrinter: +class RustCStyleVariantPrinter(object): def __init__(self, val): assert val.type.get_dwarf_type_kind() == rustpp.DWARF_TYPE_CODE_ENUM self.__val = val @@ -275,7 +277,7 @@ class RustCStyleVariantPrinter: return str(self.__val.get_wrapped_value()) -class IdentityPrinter: +class IdentityPrinter(object): def __init__(self, string): self.string = string diff --git a/src/etc/lldb_batchmode.py b/src/etc/lldb_batchmode.py index 7bbb3577f8..4952cf4f82 100644 --- a/src/etc/lldb_batchmode.py +++ b/src/etc/lldb_batchmode.py @@ -37,14 +37,14 @@ DEBUG_OUTPUT = False def print_debug(s): - "Print something if DEBUG_OUTPUT is True" + """Print something if DEBUG_OUTPUT is True""" global DEBUG_OUTPUT if DEBUG_OUTPUT: print("DEBUG: " + str(s)) def normalize_whitespace(s): - "Replace newlines, tabs, multiple spaces, etc with exactly one space" + """Replace newlines, tabs, multiple spaces, etc with exactly one space""" return re.sub("\s+", " ", s) @@ -71,7 +71,7 @@ registered_breakpoints = set() def execute_command(command_interpreter, command): - "Executes a single CLI command" + """Executes a single CLI command""" global new_breakpoints global registered_breakpoints diff --git a/src/etc/lldb_rust_formatters.py b/src/etc/lldb_rust_formatters.py index c22a60abf3..335acae5fb 100644 --- a/src/etc/lldb_rust_formatters.py +++ b/src/etc/lldb_rust_formatters.py @@ -29,7 +29,7 @@ class LldbType(rustpp.Type): if qualified_name is None: return qualified_name - return extract_type_name(qualified_name).replace("&'static ", "&") + return rustpp.extract_type_name(qualified_name).replace("&'static ", "&") def get_dwarf_type_kind(self): type_class = self.ty.GetTypeClass() @@ -171,10 +171,10 @@ def print_val(lldb_val, internal_dict): #=-------------------------------------------------------------------------------------------------- def print_struct_val(val, internal_dict, omit_first_field, omit_type_name, is_tuple_like): - ''' + """ Prints a struct, tuple, or tuple struct value with Rust syntax. Ignores any fields before field_start_index. - ''' + """ assert val.type.get_dwarf_type_kind() == rustpp.DWARF_TYPE_CODE_STRUCT if omit_type_name: @@ -204,7 +204,7 @@ def print_struct_val(val, internal_dict, omit_first_field, omit_type_name, is_tu # LLDB is not good at handling zero-sized values, so we have to help # it a little if field.GetType().GetByteSize() == 0: - return this + extract_type_name(field.GetType().GetName()) + return this + rustpp.extract_type_name(field.GetType().GetName()) else: return this + "" @@ -221,7 +221,7 @@ def print_struct_val(val, internal_dict, omit_first_field, omit_type_name, is_tu "body": body} def print_pointer_val(val, internal_dict): - '''Prints a pointer value with Rust syntax''' + """Prints a pointer value with Rust syntax""" assert val.type.get_dwarf_type_kind() == rustpp.DWARF_TYPE_CODE_PTR sigil = "&" type_name = val.type.get_unqualified_type_name() @@ -274,26 +274,9 @@ def print_std_string_val(val, internal_dict): # Helper Functions #=-------------------------------------------------------------------------------------------------- -UNQUALIFIED_TYPE_MARKERS = frozenset(["(", "[", "&", "*"]) - -def extract_type_name(qualified_type_name): - '''Extracts the type name from a fully qualified path''' - if qualified_type_name[0] in UNQUALIFIED_TYPE_MARKERS: - return qualified_type_name - - end_of_search = qualified_type_name.find("<") - if end_of_search < 0: - end_of_search = len(qualified_type_name) - - index = qualified_type_name.rfind("::", 0, end_of_search) - if index < 0: - return qualified_type_name - else: - return qualified_type_name[index + 2:] - def print_array_of_values(array_name, data_ptr_val, length, internal_dict): - '''Prints a contigous memory range, interpreting it as values of the - pointee-type of data_ptr_val.''' + """Prints a contigous memory range, interpreting it as values of the + pointee-type of data_ptr_val.""" data_ptr_type = data_ptr_val.type assert data_ptr_type.get_dwarf_type_kind() == rustpp.DWARF_TYPE_CODE_PTR diff --git a/src/etc/local_stage0.sh b/src/etc/local_stage0.sh index f5f39d264a..645a80ab8b 100755 --- a/src/etc/local_stage0.sh +++ b/src/etc/local_stage0.sh @@ -18,7 +18,7 @@ LIB_PREFIX=lib OS=`uname -s` case $OS in - ("Linux"|"FreeBSD"|"DragonFly"|"Bitrig"|"OpenBSD"|"SunOS") + ("Linux"|"FreeBSD"|"DragonFly"|"Bitrig"|"OpenBSD"|"SunOS"|"Haiku") BIN_SUF= LIB_SUF=.so ;; diff --git a/src/etc/platform-intrinsics/generator.py b/src/etc/platform-intrinsics/generator.py index a4a91170ef..e3c08bb35e 100644 --- a/src/etc/platform-intrinsics/generator.py +++ b/src/etc/platform-intrinsics/generator.py @@ -119,16 +119,19 @@ class Void(Type): def __init__(self): Type.__init__(self, 0) - def compiler_ctor(self): + @staticmethod + def compiler_ctor(): return '::VOID' def compiler_ctor_ref(self): return '&' + self.compiler_ctor() - def rust_name(self): + @staticmethod + def rust_name(): return '()' - def type_info(self, platform_info): + @staticmethod + def type_info(platform_info): return None def __eq__(self, other): @@ -282,7 +285,7 @@ class Vector(Type): class Pointer(Type): def __init__(self, elem, llvm_elem, const): - self._elem = elem; + self._elem = elem self._llvm_elem = llvm_elem self._const = const Type.__init__(self, BITWIDTH_POINTER) @@ -503,7 +506,7 @@ class GenericIntrinsic(object): # must be a power of two assert width & (width - 1) == 0 def recur(processed, untouched): - if untouched == []: + if not untouched: ret = processed[0] args = processed[1:] yield MonomorphicIntrinsic(self._platform, self.intrinsic, width, @@ -756,22 +759,26 @@ class ExternBlock(object): def __init__(self): pass - def open(self, platform): + @staticmethod + def open(platform): return 'extern "platform-intrinsic" {' - def render(self, mono): + @staticmethod + def render(mono): return ' fn {}{}{};'.format(mono.platform_prefix(), mono.intrinsic_name(), mono.intrinsic_signature()) - def close(self): + @staticmethod + def close(): return '}' class CompilerDefs(object): def __init__(self): pass - def open(self, platform): + @staticmethod + def open(platform): return '''\ // Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at @@ -798,7 +805,8 @@ pub fn find(name: &str) -> Option {{ if !name.starts_with("{0}") {{ return None }} Some(match &name["{0}".len()..] {{'''.format(platform.platform_prefix()) - def render(self, mono): + @staticmethod + def render(mono): return '''\ "{}" => Intrinsic {{ inputs: {{ static INPUTS: [&'static Type; {}] = [{}]; &INPUTS }}, @@ -810,7 +818,8 @@ pub fn find(name: &str) -> Option {{ mono.compiler_ret(), mono.llvm_name()) - def close(self): + @staticmethod + def close(): return '''\ _ => return None, }) diff --git a/src/etc/test-float-parse/runtests.py b/src/etc/test-float-parse/runtests.py index 896d63b9f0..bc141877b3 100644 --- a/src/etc/test-float-parse/runtests.py +++ b/src/etc/test-float-parse/runtests.py @@ -177,7 +177,6 @@ def run(test): def interact(proc, queue): - line = "" n = 0 while proc.poll() is None: line = proc.stdout.readline() @@ -185,7 +184,6 @@ def interact(proc, queue): continue assert line.endswith('\n'), "incomplete line: " + repr(line) queue.put(line) - line = "" n += 1 if n % UPDATE_EVERY_N == 0: msg("got", str(n // 1000) + "k", "records") diff --git a/src/etc/unicode.py b/src/etc/unicode.py index 822a3894fc..bddc83f63d 100755 --- a/src/etc/unicode.py +++ b/src/etc/unicode.py @@ -82,28 +82,28 @@ def load_unicode_data(f): canon_decomp = {} compat_decomp = {} - udict = {}; - range_start = -1; + udict = {} + range_start = -1 for line in fileinput.input(f): - data = line.split(';'); + data = line.split(';') if len(data) != 15: continue - cp = int(data[0], 16); + cp = int(data[0], 16) if is_surrogate(cp): continue if range_start >= 0: for i in xrange(range_start, cp): - udict[i] = data; - range_start = -1; + udict[i] = data + range_start = -1 if data[1].endswith(", First>"): - range_start = cp; - continue; - udict[cp] = data; + range_start = cp + continue + udict[cp] = data for code in udict: - [code_org, name, gencat, combine, bidi, + (code_org, name, gencat, combine, bidi, decomp, deci, digit, num, mirror, - old, iso, upcase, lowcase, titlecase ] = udict[code]; + old, iso, upcase, lowcase, titlecase) = udict[code] # generate char to char direct common and simple conversions # uppercase to lowercase @@ -382,7 +382,7 @@ def emit_bool_trie(f, name, t_data, is_pub=True): global bytes_old, bytes_new bytes_old += 8 * len(t_data) CHUNK = 64 - rawdata = [False] * 0x110000; + rawdata = [False] * 0x110000 for (lo, hi) in t_data: for cp in range(lo, hi + 1): rawdata[cp] = True diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index 64b780413f..5f9ccd1820 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -71,6 +71,12 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// does not use atomics, making it both thread-unsafe as well as significantly /// faster when updating the reference count. /// +/// Note: the inherent methods defined on `Arc` are all associated functions, +/// which means that you have to call them as e.g. `Arc::get_mut(&value)` +/// instead of `value.get_mut()`. This is so that there are no conflicts with +/// methods on the inner type `T`, which are what you want to call in the +/// majority of cases. +/// /// # Examples /// /// In this example, a large vector of data will be shared by several threads. First we @@ -121,7 +127,7 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// } /// ``` -#[unsafe_no_drop_flag] +#[cfg_attr(stage0, unsafe_no_drop_flag)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Arc { ptr: Shared>, @@ -147,7 +153,7 @@ impl, U: ?Sized> CoerceUnsized> for Arc {} /// nodes behind strong `Arc` pointers, and then storing the parent pointers /// as `Weak` pointers. -#[unsafe_no_drop_flag] +#[cfg_attr(stage0, unsafe_no_drop_flag)] #[stable(feature = "arc_weak", since = "1.4.0")] pub struct Weak { ptr: Shared>, @@ -325,6 +331,33 @@ impl Arc { deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } + + #[inline] + #[unstable(feature = "ptr_eq", + reason = "newly added", + issue = "36497")] + /// Return whether two `Arc` references point to the same value + /// (not just values that compare equal). + /// + /// # Examples + /// + /// ``` + /// #![feature(ptr_eq)] + /// + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// let same_five = five.clone(); + /// let other_five = Arc::new(5); + /// + /// assert!(Arc::ptr_eq(&five, &same_five)); + /// assert!(!Arc::ptr_eq(&five, &other_five)); + /// ``` + pub fn ptr_eq(this: &Self, other: &Self) -> bool { + let this_ptr: *const ArcInner = *this.ptr; + let other_ptr: *const ArcInner = *other.ptr; + this_ptr == other_ptr + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -559,15 +592,6 @@ impl Drop for Arc { #[unsafe_destructor_blind_to_params] #[inline] fn drop(&mut self) { - // This structure has #[unsafe_no_drop_flag], so this drop glue may run - // more than once (but it is guaranteed to be zeroed after the first if - // it's run more than once) - let thin = *self.ptr as *const (); - - if thin as usize == mem::POST_DROP_USIZE { - return; - } - // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. This // same logic applies to the below `fetch_sub` to the `weak` count. @@ -721,6 +745,7 @@ impl Clone for Weak { #[stable(feature = "downgraded_weak", since = "1.10.0")] impl Default for Weak { + /// Constructs a new `Weak` without an accompanying instance of T. fn default() -> Weak { Weak::new() } @@ -755,12 +780,6 @@ impl Drop for Weak { /// ``` fn drop(&mut self) { let ptr = *self.ptr; - let thin = ptr as *const (); - - // see comments above for why this check is here - if thin as usize == mem::POST_DROP_USIZE { - return; - } // If we find out that we were the last weak pointer, then its time to // deallocate the data entirely. See the discussion in Arc::drop() about @@ -932,6 +951,7 @@ impl fmt::Pointer for Arc { #[stable(feature = "rust1", since = "1.0.0")] impl Default for Arc { + /// Creates a new `Arc`, with the `Default` value for T. fn default() -> Arc { Arc::new(Default::default()) } @@ -1207,6 +1227,16 @@ mod tests { let foo: Weak = Weak::new(); assert!(foo.upgrade().is_none()); } + + #[test] + fn test_ptr_eq() { + let five = Arc::new(5); + let same_five = five.clone(); + let other_five = Arc::new(5); + + assert!(Arc::ptr_eq(&five, &same_five)); + assert!(!Arc::ptr_eq(&five, &other_five)); + } } #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 7ba5ca3094..bc9b6e805e 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -61,12 +61,12 @@ use core::borrow; use core::cmp::Ordering; use core::fmt; use core::hash::{self, Hash}; +use core::iter::FusedIterator; use core::marker::{self, Unsize}; use core::mem; use core::ops::{CoerceUnsized, Deref, DerefMut}; use core::ops::{BoxPlace, Boxed, InPlace, Place, Placer}; use core::ptr::{self, Unique}; -use core::raw::TraitObject; use core::convert::From; /// A value that represents the heap. This is the default place that the `box` @@ -271,6 +271,10 @@ impl Box { /// proper way to do so is to convert the raw pointer back into a /// `Box` with the `Box::from_raw` function. /// + /// Note: this is an associated function, which means that you have + /// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This + /// is so that there is no conflict with a method on the inner type. + /// /// # Examples /// /// ``` @@ -286,6 +290,7 @@ impl Box { #[stable(feature = "rust1", since = "1.0.0")] impl Default for Box { + /// Creates a `Box`, with the `Default` value for T. fn default() -> Box { box Default::default() } @@ -427,12 +432,8 @@ impl Box { pub fn downcast(self) -> Result, Box> { if self.is::() { unsafe { - // Get the raw representation of the trait object - let raw = Box::into_raw(self); - let to: TraitObject = mem::transmute::<*mut Any, TraitObject>(raw); - - // Extract the data pointer - Ok(Box::from_raw(to.data as *mut T)) + let raw: *mut Any = Box::into_raw(self); + Ok(Box::from_raw(raw as *mut T)) } } else { Err(self) @@ -529,6 +530,9 @@ impl DoubleEndedIterator for Box { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for Box {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Box {} + /// `FnBox` is a version of the `FnOnce` intended for use with boxed /// closure objects. The idea is that where one would normally store a diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 0293d5402c..c6453da3f4 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -88,10 +88,10 @@ #![feature(staged_api)] #![feature(unboxed_closures)] #![feature(unique)] -#![feature(unsafe_no_drop_flag, filling_drop)] +#![cfg_attr(stage0, feature(unsafe_no_drop_flag))] #![feature(unsize)] -#![cfg_attr(not(test), feature(raw, fn_traits, placement_new_protocol))] +#![cfg_attr(not(test), feature(fused, fn_traits, placement_new_protocol))] #![cfg_attr(test, feature(test, box_heap))] // Allow testing this library diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index cdb70ce577..23542215fa 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -44,7 +44,7 @@ use core::cmp; /// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity /// field. This allows zero-sized types to not be special-cased by consumers of /// this type. -#[unsafe_no_drop_flag] +#[cfg_attr(stage0, unsafe_no_drop_flag)] pub struct RawVec { ptr: Unique, cap: usize, @@ -546,13 +546,6 @@ impl RawVec { mem::forget(self); output } - - /// This is a stupid name in the hopes that someone will find this in the - /// not too distant future and remove it with the rest of - /// #[unsafe_no_drop_flag] - pub fn unsafe_no_drop_flag_needs_drop(&self) -> bool { - self.cap != mem::POST_DROP_USIZE - } } impl Drop for RawVec { @@ -560,7 +553,7 @@ impl Drop for RawVec { /// Frees the memory owned by the RawVec *without* trying to Drop its contents. fn drop(&mut self) { let elem_size = mem::size_of::(); - if elem_size != 0 && self.cap != 0 && self.unsafe_no_drop_flag_needs_drop() { + if elem_size != 0 && self.cap != 0 { let align = mem::align_of::(); let num_bytes = elem_size * self.cap; diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 2beb652aa0..e0f635f195 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -10,90 +10,138 @@ #![allow(deprecated)] -//! Unsynchronized reference-counted boxes (the `Rc` type) which are usable -//! only within a single thread. +//! Single-threaded reference-counting pointers. //! -//! The `Rc` type provides shared ownership of an immutable value. -//! Destruction is deterministic, and will occur as soon as the last owner is -//! gone. It is marked as non-sendable because it avoids the overhead of atomic -//! reference counting. +//! The type [`Rc`][rc] provides shared ownership of a value, allocated +//! in the heap. Invoking [`clone`][clone] on `Rc` produces a new pointer +//! to the same value in the heap. When the last `Rc` pointer to a given +//! value is destroyed, the pointed-to value is also destroyed. //! -//! The `downgrade` method can be used to create a non-owning `Weak` pointer -//! to the box. A `Weak` pointer can be upgraded to an `Rc` pointer, but -//! will return `None` if the value has already been dropped. +//! Shared pointers in Rust disallow mutation by default, and `Rc` is no +//! exception. If you need to mutate through an `Rc`, use [`Cell`][cell] or +//! [`RefCell`][refcell]. //! -//! For example, a tree with parent pointers can be represented by putting the -//! nodes behind strong `Rc` pointers, and then storing the parent pointers -//! as `Weak` pointers. +//! `Rc` uses non-atomic reference counting. This means that overhead is very +//! low, but an `Rc` cannot be sent between threads, and consequently `Rc` +//! does not implement [`Send`][send]. As a result, the Rust compiler +//! will check *at compile time* that you are not sending `Rc`s between +//! threads. If you need multi-threaded, atomic reference counting, use +//! [`sync::Arc`][arc]. +//! +//! The [`downgrade`][downgrade] method can be used to create a non-owning +//! [`Weak`][weak] pointer. A `Weak` pointer can be [`upgrade`][upgrade]d +//! to an `Rc`, but this will return [`None`][option] if the value has +//! already been dropped. +//! +//! A cycle between `Rc` pointers will never be deallocated. For this reason, +//! `Weak` is used to break cycles. For example, a tree could have strong +//! `Rc` pointers from parent nodes to children, and `Weak` pointers from +//! children back to their parents. +//! +//! `Rc` automatically dereferences to `T` (via the [`Deref`][deref] trait), +//! so you can call `T`'s methods on a value of type `Rc`. To avoid name +//! clashes with `T`'s methods, the methods of `Rc` itself are [associated +//! functions][assoc], called using function-like syntax: +//! +//! ``` +//! # use std::rc::Rc; +//! # let my_rc = Rc::new(()); +//! Rc::downgrade(&my_rc); +//! ``` +//! +//! `Weak` does not auto-dereference to `T`, because the value may have +//! already been destroyed. +//! +//! [rc]: struct.Rc.html +//! [weak]: struct.Weak.html +//! [clone]: ../../std/clone/trait.Clone.html#tymethod.clone +//! [cell]: ../../std/cell/struct.Cell.html +//! [refcell]: ../../std/cell/struct.RefCell.html +//! [send]: ../../std/marker/trait.Send.html +//! [arc]: ../../std/sync/struct.Arc.html +//! [deref]: ../../std/ops/trait.Deref.html +//! [downgrade]: struct.Rc.html#method.downgrade +//! [upgrade]: struct.Weak.html#method.upgrade +//! [option]: ../../std/option/enum.Option.html +//! [assoc]: ../../book/method-syntax.html#associated-functions //! //! # Examples //! //! Consider a scenario where a set of `Gadget`s are owned by a given `Owner`. //! We want to have our `Gadget`s point to their `Owner`. We can't do this with //! unique ownership, because more than one gadget may belong to the same -//! `Owner`. `Rc` allows us to share an `Owner` between multiple `Gadget`s, +//! `Owner`. `Rc` allows us to share an `Owner` between multiple `Gadget`s, //! and have the `Owner` remain allocated as long as any `Gadget` points at it. //! -//! ```rust +//! ``` //! use std::rc::Rc; //! //! struct Owner { -//! name: String +//! name: String, //! // ...other fields //! } //! //! struct Gadget { //! id: i32, -//! owner: Rc +//! owner: Rc, //! // ...other fields //! } //! //! fn main() { -//! // Create a reference counted Owner. -//! let gadget_owner : Rc = Rc::new( -//! Owner { name: String::from("Gadget Man") } +//! // Create a reference-counted `Owner`. +//! let gadget_owner: Rc = Rc::new( +//! Owner { +//! name: "Gadget Man".to_string(), +//! } //! ); //! -//! // Create Gadgets belonging to gadget_owner. To increment the reference -//! // count we clone the `Rc` object. -//! let gadget1 = Gadget { id: 1, owner: gadget_owner.clone() }; -//! let gadget2 = Gadget { id: 2, owner: gadget_owner.clone() }; +//! // Create `Gadget`s belonging to `gadget_owner`. Cloning the `Rc` +//! // value gives us a new pointer to the same `Owner` value, incrementing +//! // the reference count in the process. +//! let gadget1 = Gadget { +//! id: 1, +//! owner: gadget_owner.clone(), +//! }; +//! let gadget2 = Gadget { +//! id: 2, +//! owner: gadget_owner.clone(), +//! }; //! +//! // Dispose of our local variable `gadget_owner`. //! drop(gadget_owner); //! -//! // Despite dropping gadget_owner, we're still able to print out the name -//! // of the Owner of the Gadgets. This is because we've only dropped the -//! // reference count object, not the Owner it wraps. As long as there are -//! // other `Rc` objects pointing at the same Owner, it will remain -//! // allocated. Notice that the `Rc` wrapper around Gadget.owner gets -//! // automatically dereferenced for us. +//! // Despite dropping `gadget_owner`, we're still able to print out the name +//! // of the `Owner` of the `Gadget`s. This is because we've only dropped a +//! // single `Rc`, not the `Owner` it points to. As long as there are +//! // other `Rc` values pointing at the same `Owner`, it will remain +//! // allocated. The field projection `gadget1.owner.name` works because +//! // `Rc` automatically dereferences to `Owner`. //! println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name); //! println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name); //! -//! // At the end of the method, gadget1 and gadget2 get destroyed, and with -//! // them the last counted references to our Owner. Gadget Man now gets -//! // destroyed as well. +//! // At the end of the function, `gadget1` and `gadget2` are destroyed, and +//! // with them the last counted references to our `Owner`. Gadget Man now +//! // gets destroyed as well. //! } //! ``` //! //! If our requirements change, and we also need to be able to traverse from -//! Owner → Gadget, we will run into problems: an `Rc` pointer from Owner -//! → Gadget introduces a cycle between the objects. This means that their -//! reference counts can never reach 0, and the objects will remain allocated: a -//! memory leak. In order to get around this, we can use `Weak` pointers. -//! These pointers don't contribute to the total count. +//! `Owner` to `Gadget`, we will run into problems. An `Rc` pointer from `Owner` +//! to `Gadget` introduces a cycle between the values. This means that their +//! reference counts can never reach 0, and the values will remain allocated +//! forever: a memory leak. In order to get around this, we can use `Weak` +//! pointers. //! //! Rust actually makes it somewhat difficult to produce this loop in the first -//! place: in order to end up with two objects that point at each other, one of -//! them needs to be mutable. This is problematic because `Rc` enforces -//! memory safety by only giving out shared references to the object it wraps, +//! place. In order to end up with two values that point at each other, one of +//! them needs to be mutable. This is difficult because `Rc` enforces +//! memory safety by only giving out shared references to the value it wraps, //! and these don't allow direct mutation. We need to wrap the part of the -//! object we wish to mutate in a `RefCell`, which provides *interior +//! value we wish to mutate in a [`RefCell`][refcell], which provides *interior //! mutability*: a method to achieve mutability through a shared reference. -//! `RefCell` enforces Rust's borrowing rules at runtime. Read the `Cell` -//! documentation for more details on interior mutability. +//! `RefCell` enforces Rust's borrowing rules at runtime. //! -//! ```rust +//! ``` //! use std::rc::Rc; //! use std::rc::Weak; //! use std::cell::RefCell; @@ -111,41 +159,58 @@ //! } //! //! fn main() { -//! // Create a reference counted Owner. Note the fact that we've put the -//! // Owner's vector of Gadgets inside a RefCell so that we can mutate it -//! // through a shared reference. -//! let gadget_owner : Rc = Rc::new( +//! // Create a reference-counted `Owner`. Note that we've put the `Owner`'s +//! // vector of `Gadget`s inside a `RefCell` so that we can mutate it through +//! // a shared reference. +//! let gadget_owner: Rc = Rc::new( //! Owner { //! name: "Gadget Man".to_string(), -//! gadgets: RefCell::new(Vec::new()), +//! gadgets: RefCell::new(vec![]), //! } //! ); //! -//! // Create Gadgets belonging to gadget_owner as before. -//! let gadget1 = Rc::new(Gadget{id: 1, owner: gadget_owner.clone()}); -//! let gadget2 = Rc::new(Gadget{id: 2, owner: gadget_owner.clone()}); +//! // Create `Gadget`s belonging to `gadget_owner`, as before. +//! let gadget1 = Rc::new( +//! Gadget { +//! id: 1, +//! owner: gadget_owner.clone(), +//! } +//! ); +//! let gadget2 = Rc::new( +//! Gadget { +//! id: 2, +//! owner: gadget_owner.clone(), +//! } +//! ); +//! +//! // Add the `Gadget`s to their `Owner`. +//! { +//! let mut gadgets = gadget_owner.gadgets.borrow_mut(); +//! gadgets.push(Rc::downgrade(&gadget1)); +//! gadgets.push(Rc::downgrade(&gadget2)); //! -//! // Add the Gadgets to their Owner. To do this we mutably borrow from -//! // the RefCell holding the Owner's Gadgets. -//! gadget_owner.gadgets.borrow_mut().push(Rc::downgrade(&gadget1)); -//! gadget_owner.gadgets.borrow_mut().push(Rc::downgrade(&gadget2)); +//! // `RefCell` dynamic borrow ends here. +//! } //! -//! // Iterate over our Gadgets, printing their details out -//! for gadget_opt in gadget_owner.gadgets.borrow().iter() { +//! // Iterate over our `Gadget`s, printing their details out. +//! for gadget_weak in gadget_owner.gadgets.borrow().iter() { //! -//! // gadget_opt is a Weak. Since weak pointers can't guarantee -//! // that their object is still allocated, we need to call upgrade() -//! // on them to turn them into a strong reference. This returns an -//! // Option, which contains a reference to our object if it still -//! // exists. -//! let gadget = gadget_opt.upgrade().unwrap(); +//! // `gadget_weak` is a `Weak`. Since `Weak` pointers can't +//! // guarantee the value is still allocated, we need to call +//! // `upgrade`, which returns an `Option>`. +//! // +//! // In this case we know the value still exists, so we simply +//! // `unwrap` the `Option`. In a more complicated program, you might +//! // need graceful error handling for a `None` result. +//! +//! let gadget = gadget_weak.upgrade().unwrap(); //! println!("Gadget {} owned by {}", gadget.id, gadget.owner.name); //! } //! -//! // At the end of the method, gadget_owner, gadget1 and gadget2 get -//! // destroyed. There are now no strong (`Rc`) references to the gadgets. -//! // Once they get destroyed, the Gadgets get destroyed. This zeroes the -//! // reference count on Gadget Man, they get destroyed as well. +//! // At the end of the function, `gadget_owner`, `gadget1`, and `gadget2` +//! // are destroyed. There are now no strong (`Rc`) pointers to the +//! // gadgets, so they are destroyed. This zeroes the reference count on +//! // Gadget Man, so he gets destroyed as well. //! } //! ``` @@ -179,10 +244,15 @@ struct RcBox { } -/// A reference-counted pointer type over an immutable value. +/// A single-threaded reference-counting pointer. +/// +/// See the [module-level documentation](./index.html) for more details. /// -/// See the [module level documentation](./index.html) for more details. -#[unsafe_no_drop_flag] +/// The inherent methods of `Rc` are all associated functions, which means +/// that you have to call them as e.g. `Rc::get_mut(&value)` instead of +/// `value.get_mut()`. This avoids conflicts with methods of the inner +/// type `T`. +#[cfg_attr(stage0, unsafe_no_drop_flag)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Rc { ptr: Shared>, @@ -223,9 +293,9 @@ impl Rc { } } - /// Unwraps the contained value if the `Rc` has exactly one strong reference. + /// Returns the contained value, if the `Rc` has exactly one strong reference. /// - /// Otherwise, an `Err` is returned with the same `Rc`. + /// Otherwise, an `Err` is returned with the same `Rc` that was passed in. /// /// This will succeed even if there are outstanding weak references. /// @@ -239,7 +309,7 @@ impl Rc { /// /// let x = Rc::new(4); /// let _y = x.clone(); - /// assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4))); + /// assert_eq!(*Rc::try_unwrap(x).unwrap_err(), 4); /// ``` #[inline] #[stable(feature = "rc_unique", since = "1.4.0")] @@ -262,7 +332,24 @@ impl Rc { } } - /// Checks if `Rc::try_unwrap` would return `Ok`. + /// Checks whether `Rc::try_unwrap` would return `Ok`. + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_would_unwrap)] + /// + /// use std::rc::Rc; + /// + /// let x = Rc::new(3); + /// assert!(Rc::would_unwrap(&x)); + /// assert_eq!(Rc::try_unwrap(x), Ok(3)); + /// + /// let x = Rc::new(4); + /// let _y = x.clone(); + /// assert!(!Rc::would_unwrap(&x)); + /// assert_eq!(*Rc::try_unwrap(x).unwrap_err(), 4); + /// ``` #[unstable(feature = "rc_would_unwrap", reason = "just added for niche usecase", issue = "28356")] @@ -272,7 +359,9 @@ impl Rc { } impl Rc { - /// Creates a new `Weak` reference from this value. + /// Creates a new [`Weak`][weak] pointer to this value. + /// + /// [weak]: struct.Weak.html /// /// # Examples /// @@ -289,7 +378,22 @@ impl Rc { Weak { ptr: this.ptr } } - /// Get the number of weak references to this value. + /// Gets the number of [`Weak`][weak] pointers to this value. + /// + /// [weak]: struct.Weak.html + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_counts)] + /// + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// let _weak_five = Rc::downgrade(&five); + /// + /// assert_eq!(1, Rc::weak_count(&five)); + /// ``` #[inline] #[unstable(feature = "rc_counts", reason = "not clearly useful", issue = "28356")] @@ -297,7 +401,20 @@ impl Rc { this.weak() - 1 } - /// Get the number of strong references to this value. + /// Gets the number of strong (`Rc`) pointers to this value. + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_counts)] + /// + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// let _also_five = five.clone(); + /// + /// assert_eq!(2, Rc::strong_count(&five)); + /// ``` #[inline] #[unstable(feature = "rc_counts", reason = "not clearly useful", issue = "28356")] @@ -305,8 +422,10 @@ impl Rc { this.strong() } - /// Returns true if there are no other `Rc` or `Weak` values that share - /// the same inner value. + /// Returns true if there are no other `Rc` or [`Weak`][weak] pointers to + /// this inner value. + /// + /// [weak]: struct.Weak.html /// /// # Examples /// @@ -326,10 +445,19 @@ impl Rc { Rc::weak_count(this) == 0 && Rc::strong_count(this) == 1 } - /// Returns a mutable reference to the contained value if the `Rc` has - /// one strong reference and no weak references. + /// Returns a mutable reference to the inner value, if there are + /// no other `Rc` or [`Weak`][weak] pointers to the same value. /// - /// Returns `None` if the `Rc` is not unique. + /// Returns [`None`][option] otherwise, because it is not safe to + /// mutate a shared value. + /// + /// See also [`make_mut`][make_mut], which will [`clone`][clone] + /// the inner value when it's shared. + /// + /// [weak]: struct.Weak.html + /// [option]: ../../std/option/enum.Option.html + /// [make_mut]: struct.Rc.html#method.make_mut + /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone /// /// # Examples /// @@ -353,14 +481,47 @@ impl Rc { None } } + + #[inline] + #[unstable(feature = "ptr_eq", + reason = "newly added", + issue = "36497")] + /// Returns true if the two `Rc`s point to the same value (not + /// just values that compare as equal). + /// + /// # Examples + /// + /// ``` + /// #![feature(ptr_eq)] + /// + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// let same_five = five.clone(); + /// let other_five = Rc::new(5); + /// + /// assert!(Rc::ptr_eq(&five, &same_five)); + /// assert!(!Rc::ptr_eq(&five, &other_five)); + /// ``` + pub fn ptr_eq(this: &Self, other: &Self) -> bool { + let this_ptr: *const RcBox = *this.ptr; + let other_ptr: *const RcBox = *other.ptr; + this_ptr == other_ptr + } } impl Rc { - /// Make a mutable reference into the given `Rc` by cloning the inner - /// data if the `Rc` doesn't have one strong reference and no weak - /// references. + /// Makes a mutable reference into the given `Rc`. + /// + /// If there are other `Rc` or [`Weak`][weak] pointers to the same value, + /// then `make_mut` will invoke [`clone`][clone] on the inner value to + /// ensure unique ownership. This is also referred to as clone-on-write. + /// + /// See also [`get_mut`][get_mut], which will fail rather than cloning. /// - /// This is also referred to as a copy-on-write. + /// [weak]: struct.Weak.html + /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone + /// [get_mut]: struct.Rc.html#method.get_mut /// /// # Examples /// @@ -369,16 +530,15 @@ impl Rc { /// /// let mut data = Rc::new(5); /// - /// *Rc::make_mut(&mut data) += 1; // Won't clone anything - /// let mut other_data = data.clone(); // Won't clone inner data - /// *Rc::make_mut(&mut data) += 1; // Clones inner data - /// *Rc::make_mut(&mut data) += 1; // Won't clone anything - /// *Rc::make_mut(&mut other_data) *= 2; // Won't clone anything + /// *Rc::make_mut(&mut data) += 1; // Won't clone anything + /// let mut other_data = data.clone(); // Won't clone inner data + /// *Rc::make_mut(&mut data) += 1; // Clones inner data + /// *Rc::make_mut(&mut data) += 1; // Won't clone anything + /// *Rc::make_mut(&mut other_data) *= 2; // Won't clone anything /// - /// // Note: data and other_data now point to different numbers + /// // Now `data` and `other_data` point to different values. /// assert_eq!(*data, 8); /// assert_eq!(*other_data, 12); - /// /// ``` #[inline] #[stable(feature = "rc_unique", since = "1.4.0")] @@ -420,50 +580,47 @@ impl Deref for Rc { #[stable(feature = "rust1", since = "1.0.0")] impl Drop for Rc { - /// Drops the `Rc`. + /// Drops the `Rc`. /// /// This will decrement the strong reference count. If the strong reference - /// count becomes zero and the only other references are `Weak` ones, - /// `drop`s the inner value. + /// count reaches zero then the only other references (if any) are `Weak`, + /// so we `drop` the inner value. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// - /// { - /// let five = Rc::new(5); - /// - /// // stuff + /// struct Foo; /// - /// drop(five); // explicit drop + /// impl Drop for Foo { + /// fn drop(&mut self) { + /// println!("dropped!"); + /// } /// } - /// { - /// let five = Rc::new(5); /// - /// // stuff + /// let foo = Rc::new(Foo); + /// let foo2 = foo.clone(); /// - /// } // implicit drop + /// drop(foo); // Doesn't print anything + /// drop(foo2); // Prints "dropped!" /// ``` #[unsafe_destructor_blind_to_params] fn drop(&mut self) { unsafe { let ptr = *self.ptr; - let thin = ptr as *const (); - if thin as usize != mem::POST_DROP_USIZE { - self.dec_strong(); - if self.strong() == 0 { - // destroy the contained object - ptr::drop_in_place(&mut (*ptr).value); + self.dec_strong(); + if self.strong() == 0 { + // destroy the contained object + ptr::drop_in_place(&mut (*ptr).value); - // remove the implicit "strong weak" pointer now that we've - // destroyed the contents. - self.dec_weak(); + // remove the implicit "strong weak" pointer now that we've + // destroyed the contents. + self.dec_weak(); - if self.weak() == 0 { - deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) - } + if self.weak() == 0 { + deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } } @@ -472,10 +629,10 @@ impl Drop for Rc { #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Rc { - /// Makes a clone of the `Rc`. + /// Makes a clone of the `Rc` pointer. /// - /// When you clone an `Rc`, it will create another pointer to the data and - /// increase the strong reference counter. + /// This creates another pointer to the same inner value, increasing the + /// strong reference count. /// /// # Examples /// @@ -503,6 +660,7 @@ impl Default for Rc { /// use std::rc::Rc; /// /// let x: Rc = Default::default(); + /// assert_eq!(*x, 0); /// ``` #[inline] fn default() -> Rc { @@ -512,9 +670,9 @@ impl Default for Rc { #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for Rc { - /// Equality for two `Rc`s. + /// Equality for two `Rc`s. /// - /// Two `Rc`s are equal if their inner value are equal. + /// Two `Rc`s are equal if their inner values are equal. /// /// # Examples /// @@ -523,16 +681,16 @@ impl PartialEq for Rc { /// /// let five = Rc::new(5); /// - /// five == Rc::new(5); + /// assert!(five == Rc::new(5)); /// ``` #[inline(always)] fn eq(&self, other: &Rc) -> bool { **self == **other } - /// Inequality for two `Rc`s. + /// Inequality for two `Rc`s. /// - /// Two `Rc`s are unequal if their inner value are unequal. + /// Two `Rc`s are unequal if their inner values are unequal. /// /// # Examples /// @@ -541,7 +699,7 @@ impl PartialEq for Rc { /// /// let five = Rc::new(5); /// - /// five != Rc::new(5); + /// assert!(five != Rc::new(6)); /// ``` #[inline(always)] fn ne(&self, other: &Rc) -> bool { @@ -554,7 +712,7 @@ impl Eq for Rc {} #[stable(feature = "rust1", since = "1.0.0")] impl PartialOrd for Rc { - /// Partial comparison for two `Rc`s. + /// Partial comparison for two `Rc`s. /// /// The two are compared by calling `partial_cmp()` on their inner values. /// @@ -562,17 +720,18 @@ impl PartialOrd for Rc { /// /// ``` /// use std::rc::Rc; + /// use std::cmp::Ordering; /// /// let five = Rc::new(5); /// - /// five.partial_cmp(&Rc::new(5)); + /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Rc::new(6))); /// ``` #[inline(always)] fn partial_cmp(&self, other: &Rc) -> Option { (**self).partial_cmp(&**other) } - /// Less-than comparison for two `Rc`s. + /// Less-than comparison for two `Rc`s. /// /// The two are compared by calling `<` on their inner values. /// @@ -583,14 +742,14 @@ impl PartialOrd for Rc { /// /// let five = Rc::new(5); /// - /// five < Rc::new(5); + /// assert!(five < Rc::new(6)); /// ``` #[inline(always)] fn lt(&self, other: &Rc) -> bool { **self < **other } - /// 'Less-than or equal to' comparison for two `Rc`s. + /// 'Less than or equal to' comparison for two `Rc`s. /// /// The two are compared by calling `<=` on their inner values. /// @@ -601,14 +760,14 @@ impl PartialOrd for Rc { /// /// let five = Rc::new(5); /// - /// five <= Rc::new(5); + /// assert!(five <= Rc::new(5)); /// ``` #[inline(always)] fn le(&self, other: &Rc) -> bool { **self <= **other } - /// Greater-than comparison for two `Rc`s. + /// Greater-than comparison for two `Rc`s. /// /// The two are compared by calling `>` on their inner values. /// @@ -619,14 +778,14 @@ impl PartialOrd for Rc { /// /// let five = Rc::new(5); /// - /// five > Rc::new(5); + /// assert!(five > Rc::new(4)); /// ``` #[inline(always)] fn gt(&self, other: &Rc) -> bool { **self > **other } - /// 'Greater-than or equal to' comparison for two `Rc`s. + /// 'Greater than or equal to' comparison for two `Rc`s. /// /// The two are compared by calling `>=` on their inner values. /// @@ -637,7 +796,7 @@ impl PartialOrd for Rc { /// /// let five = Rc::new(5); /// - /// five >= Rc::new(5); + /// assert!(five >= Rc::new(5)); /// ``` #[inline(always)] fn ge(&self, other: &Rc) -> bool { @@ -647,7 +806,7 @@ impl PartialOrd for Rc { #[stable(feature = "rust1", since = "1.0.0")] impl Ord for Rc { - /// Comparison for two `Rc`s. + /// Comparison for two `Rc`s. /// /// The two are compared by calling `cmp()` on their inner values. /// @@ -655,10 +814,11 @@ impl Ord for Rc { /// /// ``` /// use std::rc::Rc; + /// use std::cmp::Ordering; /// /// let five = Rc::new(5); /// - /// five.partial_cmp(&Rc::new(5)); + /// assert_eq!(Ordering::Less, five.cmp(&Rc::new(6))); /// ``` #[inline] fn cmp(&self, other: &Rc) -> Ordering { @@ -701,13 +861,19 @@ impl From for Rc { } } -/// A weak version of `Rc`. +/// A weak version of [`Rc`][rc]. +/// +/// `Weak` pointers do not count towards determining if the inner value +/// should be dropped. +/// +/// The typical way to obtain a `Weak` pointer is to call +/// [`Rc::downgrade`][downgrade]. /// -/// Weak references do not count when determining if the inner value should be -/// dropped. +/// See the [module-level documentation](./index.html) for more details. /// -/// See the [module level documentation](./index.html) for more. -#[unsafe_no_drop_flag] +/// [rc]: struct.Rc.html +/// [downgrade]: struct.Rc.html#method.downgrade +#[cfg_attr(stage0, unsafe_no_drop_flag)] #[stable(feature = "rc_weak", since = "1.4.0")] pub struct Weak { ptr: Shared>, @@ -722,10 +888,14 @@ impl !marker::Sync for Weak {} impl, U: ?Sized> CoerceUnsized> for Weak {} impl Weak { - /// Constructs a new `Weak` without an accompanying instance of T. + /// Constructs a new `Weak`, without an accompanying instance of `T`. /// - /// This allocates memory for T, but does not initialize it. Calling - /// Weak::upgrade() on the return value always gives None. + /// This allocates memory for `T`, but does not initialize it. Calling + /// [`upgrade`][upgrade] on the return value always gives + /// [`None`][option]. + /// + /// [upgrade]: struct.Weak.html#method.upgrade + /// [option]: ../../std/option/enum.Option.html /// /// # Examples /// @@ -733,6 +903,7 @@ impl Weak { /// use std::rc::Weak; /// /// let empty: Weak = Weak::new(); + /// assert!(empty.upgrade().is_none()); /// ``` #[stable(feature = "downgraded_weak", since = "1.10.0")] pub fn new() -> Weak { @@ -749,12 +920,13 @@ impl Weak { } impl Weak { - /// Upgrades a weak reference to a strong reference. + /// Upgrades the `Weak` pointer to an [`Rc`][rc], if possible. /// - /// Upgrades the `Weak` reference to an `Rc`, if possible. + /// Returns [`None`][option] if the strong count has reached zero and the + /// inner value was destroyed. /// - /// Returns `None` if there were no strong references and the data was - /// destroyed. + /// [rc]: struct.Rc.html + /// [option]: ../../std/option/enum.Option.html /// /// # Examples /// @@ -766,6 +938,13 @@ impl Weak { /// let weak_five = Rc::downgrade(&five); /// /// let strong_five: Option> = weak_five.upgrade(); + /// assert!(strong_five.is_some()); + /// + /// // Destroy all strong pointers. + /// drop(strong_five); + /// drop(five); + /// + /// assert!(weak_five.upgrade().is_none()); /// ``` #[stable(feature = "rc_weak", since = "1.4.0")] pub fn upgrade(&self) -> Option> { @@ -780,7 +959,7 @@ impl Weak { #[stable(feature = "rc_weak", since = "1.4.0")] impl Drop for Weak { - /// Drops the `Weak`. + /// Drops the `Weak` pointer. /// /// This will decrement the weak reference count. /// @@ -789,34 +968,32 @@ impl Drop for Weak { /// ``` /// use std::rc::Rc; /// - /// { - /// let five = Rc::new(5); - /// let weak_five = Rc::downgrade(&five); + /// struct Foo; /// - /// // stuff - /// - /// drop(weak_five); // explicit drop + /// impl Drop for Foo { + /// fn drop(&mut self) { + /// println!("dropped!"); + /// } /// } - /// { - /// let five = Rc::new(5); - /// let weak_five = Rc::downgrade(&five); /// - /// // stuff + /// let foo = Rc::new(Foo); + /// let weak_foo = Rc::downgrade(&foo); + /// let other_weak_foo = weak_foo.clone(); + /// + /// drop(weak_foo); // Doesn't print anything + /// drop(foo); // Prints "dropped!" /// - /// } // implicit drop + /// assert!(other_weak_foo.upgrade().is_none()); /// ``` fn drop(&mut self) { unsafe { let ptr = *self.ptr; - let thin = ptr as *const (); - if thin as usize != mem::POST_DROP_USIZE { - self.dec_weak(); - // the weak count starts at 1, and will only go to zero if all - // the strong pointers have disappeared. - if self.weak() == 0 { - deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) - } + self.dec_weak(); + // the weak count starts at 1, and will only go to zero if all + // the strong pointers have disappeared. + if self.weak() == 0 { + deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } } @@ -824,9 +1001,10 @@ impl Drop for Weak { #[stable(feature = "rc_weak", since = "1.4.0")] impl Clone for Weak { - /// Makes a clone of the `Weak`. + /// Makes a clone of the `Weak` pointer. /// - /// This increases the weak reference count. + /// This creates another pointer to the same inner value, increasing the + /// weak reference count. /// /// # Examples /// @@ -853,6 +1031,23 @@ impl fmt::Debug for Weak { #[stable(feature = "downgraded_weak", since = "1.10.0")] impl Default for Weak { + /// Constructs a new `Weak`, without an accompanying instance of `T`. + /// + /// This allocates memory for `T`, but does not initialize it. Calling + /// [`upgrade`][upgrade] on the return value always gives + /// [`None`][option]. + /// + /// [upgrade]: struct.Weak.html#method.upgrade + /// [option]: ../../std/option/enum.Option.html + /// + /// # Examples + /// + /// ``` + /// use std::rc::Weak; + /// + /// let empty: Weak = Default::default(); + /// assert!(empty.upgrade().is_none()); + /// ``` fn default() -> Weak { Weak::new() } @@ -1156,6 +1351,16 @@ mod tests { let foo: Weak = Weak::new(); assert!(foo.upgrade().is_none()); } + + #[test] + fn test_ptr_eq() { + let five = Rc::new(5); + let same_five = five.clone(); + let other_five = Rc::new(5); + + assert!(Rc::ptr_eq(&five, &same_five)); + assert!(!Rc::ptr_eq(&five, &other_five)); + } } #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/liballoc_jemalloc/build.rs b/src/liballoc_jemalloc/build.rs index dc1b8d6ea9..8b31c5a557 100644 --- a/src/liballoc_jemalloc/build.rs +++ b/src/liballoc_jemalloc/build.rs @@ -22,8 +22,8 @@ fn main() { println!("cargo:rustc-cfg=cargobuild"); println!("cargo:rerun-if-changed=build.rs"); - let target = env::var("TARGET").unwrap(); - let host = env::var("HOST").unwrap(); + let target = env::var("TARGET").expect("TARGET was not set"); + let host = env::var("HOST").expect("HOST was not set"); let build_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap()); let src_dir = env::current_dir().unwrap(); @@ -140,7 +140,7 @@ fn main() { .current_dir(&build_dir) .arg("build_lib_static") .arg("-j") - .arg(env::var("NUM_JOBS").unwrap())); + .arg(env::var("NUM_JOBS").expect("NUM_JOBS was not set"))); if target.contains("windows") { println!("cargo:rustc-link-lib=static=jemalloc"); diff --git a/src/liballoc_jemalloc/lib.rs b/src/liballoc_jemalloc/lib.rs index 347e97e6ff..5bbf1c35e0 100644 --- a/src/liballoc_jemalloc/lib.rs +++ b/src/liballoc_jemalloc/lib.rs @@ -77,7 +77,9 @@ const MIN_ALIGN: usize = 8; #[cfg(all(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64", - target_arch = "powerpc64")))] + target_arch = "powerpc64", + target_arch = "mips64", + target_arch = "s390x")))] const MIN_ALIGN: usize = 16; // MALLOCX_ALIGN(a) macro diff --git a/src/liballoc_system/lib.rs b/src/liballoc_system/lib.rs index 9eade937bf..01407d1acd 100644 --- a/src/liballoc_system/lib.rs +++ b/src/liballoc_system/lib.rs @@ -32,7 +32,9 @@ target_arch = "asmjs")))] const MIN_ALIGN: usize = 8; #[cfg(all(any(target_arch = "x86_64", - target_arch = "aarch64")))] + target_arch = "aarch64", + target_arch = "mips64", + target_arch = "s390x")))] const MIN_ALIGN: usize = 16; #[no_mangle] diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index b299b786b3..4986c9850d 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -15,9 +15,8 @@ //! of individual objects while the arena itself is still alive. The benefit //! of an arena is very fast allocation; just a pointer bump. //! -//! This crate has two arenas implemented: `TypedArena`, which is a simpler -//! arena but can only hold objects of a single type, and `Arena`, which is a -//! more complex, slower arena which can hold objects of any type. +//! This crate implements `TypedArena`, a simple arena that can only hold +//! objects of a single type. #![crate_name = "arena"] #![unstable(feature = "rustc_private", issue = "27812")] @@ -51,7 +50,7 @@ use std::ptr; use alloc::heap; use alloc::raw_vec::RawVec; -/// A faster arena that can hold objects of only one type. +/// An arena that can hold objects of only one type. pub struct TypedArena { /// A pointer to the next object to be allocated. ptr: Cell<*mut T>, @@ -60,7 +59,7 @@ pub struct TypedArena { /// reached, a new chunk is allocated. end: Cell<*mut T>, - /// A vector arena segments. + /// A vector of arena chunks. chunks: RefCell>>, /// Marker indicating that dropping the arena causes its owned @@ -69,7 +68,7 @@ pub struct TypedArena { } struct TypedArenaChunk { - /// Pointer to the next arena segment. + /// The raw storage for the arena chunk. storage: RawVec, } @@ -117,26 +116,16 @@ impl TypedArenaChunk { const PAGE: usize = 4096; impl TypedArena { - /// Creates a new `TypedArena` with preallocated space for many objects. + /// Creates a new `TypedArena`. #[inline] pub fn new() -> TypedArena { - // Reserve at least one page. - let elem_size = cmp::max(1, mem::size_of::()); - TypedArena::with_capacity(PAGE / elem_size) - } - - /// Creates a new `TypedArena` with preallocated space for the given number of - /// objects. - #[inline] - pub fn with_capacity(capacity: usize) -> TypedArena { - unsafe { - let chunk = TypedArenaChunk::::new(cmp::max(1, capacity)); - TypedArena { - ptr: Cell::new(chunk.start()), - end: Cell::new(chunk.end()), - chunks: RefCell::new(vec![chunk]), - _own: PhantomData, - } + TypedArena { + // We set both `ptr` and `end` to 0 so that the first call to + // alloc() will trigger a grow(). + ptr: Cell::new(0 as *mut T), + end: Cell::new(0 as *mut T), + chunks: RefCell::new(vec![]), + _own: PhantomData, } } @@ -171,29 +160,39 @@ impl TypedArena { fn grow(&self) { unsafe { let mut chunks = self.chunks.borrow_mut(); - let prev_capacity = chunks.last().unwrap().storage.cap(); - let new_capacity = prev_capacity.checked_mul(2).unwrap(); - if chunks.last_mut().unwrap().storage.double_in_place() { - self.end.set(chunks.last().unwrap().end()); + let (chunk, new_capacity); + if let Some(last_chunk) = chunks.last_mut() { + if last_chunk.storage.double_in_place() { + self.end.set(last_chunk.end()); + return; + } else { + let prev_capacity = last_chunk.storage.cap(); + new_capacity = prev_capacity.checked_mul(2).unwrap(); + } } else { - let chunk = TypedArenaChunk::::new(new_capacity); - self.ptr.set(chunk.start()); - self.end.set(chunk.end()); - chunks.push(chunk); + let elem_size = cmp::max(1, mem::size_of::()); + new_capacity = cmp::max(1, PAGE / elem_size); } + chunk = TypedArenaChunk::::new(new_capacity); + self.ptr.set(chunk.start()); + self.end.set(chunk.end()); + chunks.push(chunk); } } + /// Clears the arena. Deallocates all but the longest chunk which may be reused. pub fn clear(&mut self) { unsafe { // Clear the last chunk, which is partially filled. let mut chunks_borrow = self.chunks.borrow_mut(); - let last_idx = chunks_borrow.len() - 1; - self.clear_last_chunk(&mut chunks_borrow[last_idx]); - // If `T` is ZST, code below has no effect. - for mut chunk in chunks_borrow.drain(..last_idx) { - let cap = chunk.storage.cap(); - chunk.destroy(cap); + if let Some(mut last_chunk) = chunks_borrow.pop() { + self.clear_last_chunk(&mut last_chunk); + // If `T` is ZST, code below has no effect. + for mut chunk in chunks_borrow.drain(..) { + let cap = chunk.storage.cap(); + chunk.destroy(cap); + } + chunks_borrow.push(last_chunk); } } } @@ -230,13 +229,14 @@ impl Drop for TypedArena { unsafe { // Determine how much was filled. let mut chunks_borrow = self.chunks.borrow_mut(); - let mut last_chunk = chunks_borrow.pop().unwrap(); - // Drop the contents of the last chunk. - self.clear_last_chunk(&mut last_chunk); - // The last chunk will be dropped. Destroy all other chunks. - for chunk in chunks_borrow.iter_mut() { - let cap = chunk.storage.cap(); - chunk.destroy(cap); + if let Some(mut last_chunk) = chunks_borrow.pop() { + // Drop the contents of the last chunk. + self.clear_last_chunk(&mut last_chunk); + // The last chunk will be dropped. Destroy all other chunks. + for chunk in chunks_borrow.iter_mut() { + let cap = chunk.storage.cap(); + chunk.destroy(cap); + } } // RawVec handles deallocation of `last_chunk` and `self.chunks`. } @@ -260,6 +260,12 @@ mod tests { z: i32, } + #[test] + pub fn test_unused() { + let arena: TypedArena = TypedArena::new(); + assert!(arena.chunks.borrow().is_empty()); + } + #[test] fn test_arena_alloc_nested() { struct Inner { diff --git a/src/libcollections/binary_heap.rs b/src/libcollections/binary_heap.rs index fe9b60c393..5f2401b236 100644 --- a/src/libcollections/binary_heap.rs +++ b/src/libcollections/binary_heap.rs @@ -151,8 +151,8 @@ #![allow(missing_docs)] #![stable(feature = "rust1", since = "1.0.0")] -use core::ops::{Drop, Deref, DerefMut}; -use core::iter::FromIterator; +use core::ops::{Deref, DerefMut}; +use core::iter::{FromIterator, FusedIterator}; use core::mem::swap; use core::mem::size_of; use core::ptr; @@ -263,6 +263,7 @@ impl Clone for BinaryHeap { #[stable(feature = "rust1", since = "1.0.0")] impl Default for BinaryHeap { + /// Creates an empty `BinaryHeap`. #[inline] fn default() -> BinaryHeap { BinaryHeap::new() @@ -534,6 +535,7 @@ impl BinaryHeap { /// /// ``` /// #![feature(binary_heap_extras)] + /// #![allow(deprecated)] /// /// use std::collections::BinaryHeap; /// let mut heap = BinaryHeap::new(); @@ -548,6 +550,7 @@ impl BinaryHeap { #[unstable(feature = "binary_heap_extras", reason = "needs to be audited", issue = "28147")] + #[rustc_deprecated(since = "1.13.0", reason = "use `peek_mut` instead")] pub fn push_pop(&mut self, mut item: T) -> T { match self.data.get_mut(0) { None => return item, @@ -574,6 +577,7 @@ impl BinaryHeap { /// /// ``` /// #![feature(binary_heap_extras)] + /// #![allow(deprecated)] /// /// use std::collections::BinaryHeap; /// let mut heap = BinaryHeap::new(); @@ -586,6 +590,7 @@ impl BinaryHeap { #[unstable(feature = "binary_heap_extras", reason = "needs to be audited", issue = "28147")] + #[rustc_deprecated(since = "1.13.0", reason = "use `peek_mut` instead")] pub fn replace(&mut self, mut item: T) -> Option { if !self.is_empty() { swap(&mut item, &mut self.data[0]); @@ -884,58 +889,61 @@ struct Hole<'a, T: 'a> { impl<'a, T> Hole<'a, T> { /// Create a new Hole at index `pos`. - fn new(data: &'a mut [T], pos: usize) -> Self { - unsafe { - let elt = ptr::read(&data[pos]); - Hole { - data: data, - elt: Some(elt), - pos: pos, - } + /// + /// Unsafe because pos must be within the data slice. + #[inline] + unsafe fn new(data: &'a mut [T], pos: usize) -> Self { + debug_assert!(pos < data.len()); + let elt = ptr::read(&data[pos]); + Hole { + data: data, + elt: Some(elt), + pos: pos, } } - #[inline(always)] + #[inline] fn pos(&self) -> usize { self.pos } /// Return a reference to the element removed - #[inline(always)] + #[inline] fn element(&self) -> &T { self.elt.as_ref().unwrap() } /// Return a reference to the element at `index`. /// - /// Panics if the index is out of bounds. - /// - /// Unsafe because index must not equal pos. - #[inline(always)] + /// Unsafe because index must be within the data slice and not equal to pos. + #[inline] unsafe fn get(&self, index: usize) -> &T { debug_assert!(index != self.pos); - &self.data[index] + debug_assert!(index < self.data.len()); + self.data.get_unchecked(index) } /// Move hole to new location /// - /// Unsafe because index must not equal pos. - #[inline(always)] + /// Unsafe because index must be within the data slice and not equal to pos. + #[inline] unsafe fn move_to(&mut self, index: usize) { debug_assert!(index != self.pos); - let index_ptr: *const _ = &self.data[index]; - let hole_ptr = &mut self.data[self.pos]; + debug_assert!(index < self.data.len()); + let index_ptr: *const _ = self.data.get_unchecked(index); + let hole_ptr = self.data.get_unchecked_mut(self.pos); ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1); self.pos = index; } } impl<'a, T> Drop for Hole<'a, T> { + #[inline] fn drop(&mut self) { // fill the hole again unsafe { let pos = self.pos; - ptr::write(&mut self.data[pos], self.elt.take().unwrap()); + ptr::write(self.data.get_unchecked_mut(pos), self.elt.take().unwrap()); } } } @@ -980,6 +988,9 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Iter<'a, T> {} + /// An iterator that moves out of a `BinaryHeap`. #[stable(feature = "rust1", since = "1.0.0")] #[derive(Clone)] @@ -1013,6 +1024,9 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + /// An iterator that drains a `BinaryHeap`. #[stable(feature = "drain", since = "1.6.0")] pub struct Drain<'a, T: 'a> { @@ -1045,6 +1059,9 @@ impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T: 'a> FusedIterator for Drain<'a, T> {} + #[stable(feature = "rust1", since = "1.0.0")] impl From> for BinaryHeap { fn from(vec: Vec) -> BinaryHeap { diff --git a/src/libcollections/borrow.rs b/src/libcollections/borrow.rs index 37dbeb4eae..700f88dc0f 100644 --- a/src/libcollections/borrow.rs +++ b/src/libcollections/borrow.rs @@ -12,14 +12,9 @@ #![stable(feature = "rust1", since = "1.0.0")] -use core::clone::Clone; -use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}; -use core::convert::AsRef; -use core::default::Default; +use core::cmp::Ordering; use core::hash::{Hash, Hasher}; -use core::marker::Sized; use core::ops::Deref; -use core::option::Option; use fmt; @@ -254,6 +249,7 @@ impl<'a, B: ?Sized> Default for Cow<'a, B> where B: ToOwned, ::Owned: Default { + /// Creates an owned Cow<'a, B> with the default value for the contained owned value. fn default() -> Cow<'a, B> { Owned(::Owned::default()) } diff --git a/src/libcollections/btree/map.rs b/src/libcollections/btree/map.rs index a2e2ad37ac..36cb5a1fd9 100644 --- a/src/libcollections/btree/map.rs +++ b/src/libcollections/btree/map.rs @@ -11,7 +11,7 @@ use core::cmp::Ordering; use core::fmt::Debug; use core::hash::{Hash, Hasher}; -use core::iter::{FromIterator, Peekable}; +use core::iter::{FromIterator, Peekable, FusedIterator}; use core::marker::PhantomData; use core::ops::Index; use core::{fmt, intrinsics, mem, ptr}; @@ -56,8 +56,12 @@ use self::Entry::*; /// however, performance is excellent. /// /// It is a logic error for a key to be modified in such a way that the key's ordering relative to -/// any other key, as determined by the `Ord` trait, changes while it is in the map. This is -/// normally only possible through `Cell`, `RefCell`, global state, I/O, or unsafe code. +/// any other key, as determined by the [`Ord`] trait, changes while it is in the map. This is +/// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. +/// +/// [`Ord`]: ../../std/cmp/trait.Ord.html +/// [`Cell`]: ../../std/cell/struct.Cell.html +/// [`RefCell`]: ../../std/cell/struct.RefCell.html /// /// # Examples /// @@ -1147,6 +1151,9 @@ impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for Iter<'a, K, V> {} + impl<'a, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, K, V> { fn next_back(&mut self) -> Option<(&'a K, &'a V)> { if self.length == 0 { @@ -1216,6 +1223,9 @@ impl<'a, K: 'a, V: 'a> ExactSizeIterator for IterMut<'a, K, V> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {} + impl IntoIterator for BTreeMap { type Item = (K, V); type IntoIter = IntoIter; @@ -1338,6 +1348,9 @@ impl ExactSizeIterator for IntoIter { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + impl<'a, K, V> Iterator for Keys<'a, K, V> { type Item = &'a K; @@ -1362,6 +1375,9 @@ impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for Keys<'a, K, V> {} + impl<'a, K, V> Clone for Keys<'a, K, V> { fn clone(&self) -> Keys<'a, K, V> { Keys { inner: self.inner.clone() } @@ -1392,6 +1408,9 @@ impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for Values<'a, K, V> {} + impl<'a, K, V> Clone for Values<'a, K, V> { fn clone(&self) -> Values<'a, K, V> { Values { inner: self.inner.clone() } @@ -1437,6 +1456,10 @@ impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {} + + impl<'a, K, V> Range<'a, K, V> { unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) { let handle = self.front; @@ -1511,6 +1534,9 @@ impl<'a, K, V> Range<'a, K, V> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for Range<'a, K, V> {} + impl<'a, K, V> Clone for Range<'a, K, V> { fn clone(&self) -> Range<'a, K, V> { Range { @@ -1574,6 +1600,9 @@ impl<'a, K, V> DoubleEndedIterator for RangeMut<'a, K, V> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for RangeMut<'a, K, V> {} + impl<'a, K, V> RangeMut<'a, K, V> { unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a mut V) { let handle = ptr::read(&self.back); @@ -1638,6 +1667,7 @@ impl Hash for BTreeMap { } impl Default for BTreeMap { + /// Creates an empty `BTreeMap`. fn default() -> BTreeMap { BTreeMap::new() } @@ -1995,7 +2025,7 @@ impl<'a, K: Ord, V> VacantEntry<'a, K, V> { self.key } - /// Sets the value of the entry with the VacantEntry's key, + /// Sets the value of the entry with the `VacantEntry`'s key, /// and returns a mutable reference to it. /// /// # Examples @@ -2167,7 +2197,7 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> { self.handle.into_kv_mut().1 } - /// Sets the value of the entry with the OccupiedEntry's key, + /// Sets the value of the entry with the `OccupiedEntry`'s key, /// and returns the entry's old value. /// /// # Examples diff --git a/src/libcollections/btree/set.rs b/src/libcollections/btree/set.rs index 0f885bc295..fc2a7f8254 100644 --- a/src/libcollections/btree/set.rs +++ b/src/libcollections/btree/set.rs @@ -15,7 +15,7 @@ use core::cmp::Ordering::{self, Less, Greater, Equal}; use core::cmp::{min, max}; use core::fmt::Debug; use core::fmt; -use core::iter::{Peekable, FromIterator}; +use core::iter::{Peekable, FromIterator, FusedIterator}; use core::ops::{BitOr, BitAnd, BitXor, Sub}; use borrow::Borrow; @@ -674,6 +674,7 @@ impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BTreeSet { #[stable(feature = "rust1", since = "1.0.0")] impl Default for BTreeSet { + /// Makes an empty `BTreeSet` with a reasonable choice of B. fn default() -> BTreeSet { BTreeSet::new() } @@ -805,6 +806,8 @@ impl<'a, T> ExactSizeIterator for Iter<'a, T> { fn len(&self) -> usize { self.iter.len() } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl Iterator for IntoIter { @@ -828,6 +831,8 @@ impl ExactSizeIterator for IntoIter { fn len(&self) -> usize { self.iter.len() } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} impl<'a, T> Clone for Range<'a, T> { fn clone(&self) -> Range<'a, T> { @@ -847,6 +852,9 @@ impl<'a, T> DoubleEndedIterator for Range<'a, T> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Range<'a, T> {} + /// Compare `x` and `y`, but return `short` if x is None and `long` if y is None fn cmp_opt(x: Option<&T>, y: Option<&T>, short: Ordering, long: Ordering) -> Ordering { match (x, y) { @@ -890,6 +898,9 @@ impl<'a, T: Ord> Iterator for Difference<'a, T> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T: Ord> FusedIterator for Difference<'a, T> {} + impl<'a, T> Clone for SymmetricDifference<'a, T> { fn clone(&self) -> SymmetricDifference<'a, T> { SymmetricDifference { @@ -920,6 +931,9 @@ impl<'a, T: Ord> Iterator for SymmetricDifference<'a, T> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T: Ord> FusedIterator for SymmetricDifference<'a, T> {} + impl<'a, T> Clone for Intersection<'a, T> { fn clone(&self) -> Intersection<'a, T> { Intersection { @@ -960,6 +974,9 @@ impl<'a, T: Ord> Iterator for Intersection<'a, T> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T: Ord> FusedIterator for Intersection<'a, T> {} + impl<'a, T> Clone for Union<'a, T> { fn clone(&self) -> Union<'a, T> { Union { @@ -991,3 +1008,6 @@ impl<'a, T: Ord> Iterator for Union<'a, T> { (max(a_len, b_len), Some(a_len + b_len)) } } + +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T: Ord> FusedIterator for Union<'a, T> {} diff --git a/src/libcollections/enum_set.rs b/src/libcollections/enum_set.rs index 0c66c0564c..2456a04e40 100644 --- a/src/libcollections/enum_set.rs +++ b/src/libcollections/enum_set.rs @@ -20,7 +20,7 @@ use core::marker; use core::fmt; -use core::iter::FromIterator; +use core::iter::{FromIterator, FusedIterator}; use core::ops::{Sub, BitOr, BitAnd, BitXor}; // FIXME(contentions): implement union family of methods? (general design may be @@ -266,6 +266,9 @@ impl Iterator for Iter { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Iter {} + impl FromIterator for EnumSet { fn from_iter>(iter: I) -> EnumSet { let mut ret = EnumSet::new(); diff --git a/src/libcollections/fmt.rs b/src/libcollections/fmt.rs index be0ef85d6b..beb3e6b3d4 100644 --- a/src/libcollections/fmt.rs +++ b/src/libcollections/fmt.rs @@ -165,9 +165,15 @@ //! provides some helper methods. //! //! Additionally, the return value of this function is `fmt::Result` which is a -//! typedef to `Result<(), std::io::Error>` (also known as `std::io::Result<()>`). -//! Formatting implementations should ensure that they return errors from `write!` -//! correctly (propagating errors upward). +//! type alias of `Result<(), std::fmt::Error>`. Formatting implementations +//! should ensure that they propagate errors from the `Formatter` (e.g., when +//! calling `write!`) however, they should never return errors spuriously. That +//! is, a formatting implementation must and may only return an error if the +//! passed-in `Formatter` returns an error. This is because, contrary to what +//! the function signature might suggest, string formatting is an infallible +//! operation. This function only returns a result because writing to the +//! underlying stream might fail and it must provide a way to propagate the fact +//! that an error has occurred back up the stack. //! //! An example of implementing the formatting traits would look //! like: @@ -530,7 +536,7 @@ use string; /// assert_eq!(s, "Hello, world!"); /// ``` /// -/// [format!]: ../macro.format!.html +/// [format!]: ../macro.format.html #[stable(feature = "rust1", since = "1.0.0")] pub fn format(args: Arguments) -> string::String { let mut output = string::String::new(); diff --git a/src/libcollections/lib.rs b/src/libcollections/lib.rs index 21387a1aa9..c5a9216934 100644 --- a/src/libcollections/lib.rs +++ b/src/libcollections/lib.rs @@ -37,6 +37,7 @@ #![feature(core_intrinsics)] #![feature(dropck_parametricity)] #![feature(fmt_internals)] +#![feature(fused)] #![feature(heap_api)] #![feature(inclusive_range)] #![feature(lang_items)] @@ -51,7 +52,7 @@ #![feature(step_by)] #![feature(unicode)] #![feature(unique)] -#![feature(unsafe_no_drop_flag)] +#![cfg_attr(stage0, feature(unsafe_no_drop_flag))] #![cfg_attr(test, feature(rand, test))] #![no_std] diff --git a/src/libcollections/linked_list.rs b/src/libcollections/linked_list.rs index 73aa67849f..690c4f4af3 100644 --- a/src/libcollections/linked_list.rs +++ b/src/libcollections/linked_list.rs @@ -19,7 +19,7 @@ use alloc::boxed::{Box, IntermediateBox}; use core::cmp::Ordering; use core::fmt; use core::hash::{Hasher, Hash}; -use core::iter::FromIterator; +use core::iter::{FromIterator, FusedIterator}; use core::marker::PhantomData; use core::mem; use core::ops::{BoxPlace, InPlace, Place, Placer}; @@ -164,6 +164,7 @@ impl LinkedList { #[stable(feature = "rust1", since = "1.0.0")] impl Default for LinkedList { + /// Creates an empty `LinkedList`. #[inline] fn default() -> Self { Self::new() @@ -754,6 +755,9 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Iter<'a, T> {} + #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for IterMut<'a, T> { type Item = &'a mut T; @@ -798,6 +802,9 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for IterMut<'a, T> {} + impl<'a, T> IterMut<'a, T> { /// Inserts the given element just after the element most recently returned by `.next()`. /// The inserted element does not appear in the iteration. @@ -905,6 +912,9 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + #[stable(feature = "rust1", since = "1.0.0")] impl FromIterator for LinkedList { fn from_iter>(iter: I) -> Self { @@ -1150,9 +1160,6 @@ unsafe impl<'a, T: Sync> Sync for IterMut<'a, T> {} #[cfg(test)] mod tests { - use std::clone::Clone; - use std::iter::{Iterator, IntoIterator, Extend}; - use std::option::Option::{self, Some, None}; use std::__rand::{thread_rng, Rng}; use std::thread; use std::vec::Vec; @@ -1310,7 +1317,6 @@ mod tests { #[test] fn test_26021() { - use std::iter::ExactSizeIterator; // There was a bug in split_off that failed to null out the RHS's head's prev ptr. // This caused the RHS's dtor to walk up into the LHS at drop and delete all of // its nodes. diff --git a/src/libcollections/range.rs b/src/libcollections/range.rs index 1badc72aed..d331ead2c5 100644 --- a/src/libcollections/range.rs +++ b/src/libcollections/range.rs @@ -14,7 +14,6 @@ //! Range syntax. -use core::option::Option::{self, None, Some}; use core::ops::{RangeFull, Range, RangeTo, RangeFrom}; /// **RangeArgument** is implemented by Rust's built-in range types, produced diff --git a/src/libcollections/slice.rs b/src/libcollections/slice.rs index 5cdf4ee88c..54dc7ec06d 100644 --- a/src/libcollections/slice.rs +++ b/src/libcollections/slice.rs @@ -1037,7 +1037,7 @@ impl [T] { self.sort_by(|a, b| a.cmp(b)) } - /// Sorts the slice, in place, using `key` to extract a key by which to + /// Sorts the slice, in place, using `f` to extract a key by which to /// order the sort by. /// /// This sort is stable and `O(n log n)` worst-case but allocates diff --git a/src/libcollections/str.rs b/src/libcollections/str.rs index 4c64019de0..6a6b450e51 100644 --- a/src/libcollections/str.rs +++ b/src/libcollections/str.rs @@ -23,6 +23,7 @@ use core::str as core_str; use core::str::pattern::Pattern; use core::str::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher}; use core::mem; +use core::iter::FusedIterator; use rustc_unicode::str::{UnicodeStr, Utf16Encoder}; use vec_deque::VecDeque; @@ -136,6 +137,9 @@ impl<'a> Iterator for EncodeUtf16<'a> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for EncodeUtf16<'a> {} + // Return the initial codepoint accumulator for the first byte. // The first byte is special, only want bottom 5 bits for width 2, 4 bits // for width 3, and 3 bits for width 4 @@ -1590,6 +1594,49 @@ impl str { result } + /// Replaces first N matches of a pattern with another string. + /// + /// `replacen` creates a new [`String`], and copies the data from this string slice into it. + /// While doing so, it attempts to find matches of a pattern. If it finds any, it + /// replaces them with the replacement string slice at most `N` times. + /// + /// [`String`]: string/struct.String.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// # #![feature(str_replacen)] + /// let s = "foo foo 123 foo"; + /// assert_eq!("new new 123 foo", s.replacen("foo", "new", 2)); + /// assert_eq!("faa fao 123 foo", s.replacen('o', "a", 3)); + /// assert_eq!("foo foo new23 foo", s.replacen(char::is_numeric, "new", 1)); + /// ``` + /// + /// When the pattern doesn't match: + /// + /// ``` + /// # #![feature(str_replacen)] + /// let s = "this is old"; + /// assert_eq!(s, s.replacen("cookie monster", "little lamb", 10)); + /// ``` + #[unstable(feature = "str_replacen", + issue = "36436", + reason = "only need to replace first N matches")] + pub fn replacen<'a, P: Pattern<'a>>(&'a self, pat: P, to: &str, count: usize) -> String { + // Hope to reduce the times of re-allocation + let mut result = String::with_capacity(32); + let mut last_end = 0; + for (start, part) in self.match_indices(pat).take(count) { + result.push_str(unsafe { self.slice_unchecked(last_end, start) }); + result.push_str(to); + last_end = start + part.len(); + } + result.push_str(unsafe { self.slice_unchecked(last_end, self.len()) }); + result + } + /// Returns the lowercase equivalent of this string slice, as a new [`String`]. /// /// 'Lowercase' is defined according to the terms of the Unicode Derived Core Property diff --git a/src/libcollections/string.rs b/src/libcollections/string.rs index 1ba0319f2d..cff0308d4a 100644 --- a/src/libcollections/string.rs +++ b/src/libcollections/string.rs @@ -57,7 +57,7 @@ use core::fmt; use core::hash; -use core::iter::FromIterator; +use core::iter::{FromIterator, FusedIterator}; use core::mem; use core::ops::{self, Add, AddAssign, Index, IndexMut}; use core::ptr; @@ -132,7 +132,7 @@ use boxed::Box; /// [`OsString`]: ../../std/ffi/struct.OsString.html /// /// Indexing is intended to be a constant-time operation, but UTF-8 encoding -/// does not allow us to do this. Furtheremore, it's not clear what sort of +/// does not allow us to do this. Furthermore, it's not clear what sort of /// thing the index should return: a byte, a codepoint, or a grapheme cluster. /// The [`as_bytes()`] and [`chars()`] methods return iterators over the first /// two, respectively. @@ -1567,6 +1567,7 @@ impl_eq! { Cow<'a, str>, String } #[stable(feature = "rust1", since = "1.0.0")] impl Default for String { + /// Creates an empty `String`. #[inline] fn default() -> String { String::new() @@ -1975,3 +1976,6 @@ impl<'a> DoubleEndedIterator for Drain<'a> { self.iter.next_back() } } + +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for Drain<'a> {} diff --git a/src/libcollections/vec.rs b/src/libcollections/vec.rs index 3aefcc7d4c..f8b4a92df2 100644 --- a/src/libcollections/vec.rs +++ b/src/libcollections/vec.rs @@ -68,7 +68,7 @@ use core::cmp::Ordering; use core::fmt; use core::hash::{self, Hash}; use core::intrinsics::{arith_offset, assume}; -use core::iter::FromIterator; +use core::iter::{FromIterator, FusedIterator}; use core::mem; use core::ops::{Index, IndexMut}; use core::ops; @@ -268,7 +268,7 @@ use super::range::RangeArgument; /// Vec does not currently guarantee the order in which elements are dropped /// (the order has changed in the past, and may change again). /// -#[unsafe_no_drop_flag] +#[cfg_attr(stage0, unsafe_no_drop_flag)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Vec { buf: RawVec, @@ -1046,21 +1046,27 @@ impl Vec { self.reserve(n); unsafe { - let len = self.len(); - let mut ptr = self.as_mut_ptr().offset(len as isize); + let mut ptr = self.as_mut_ptr().offset(self.len() as isize); + // Use SetLenOnDrop to work around bug where compiler + // may not realize the store through `ptr` trough self.set_len() + // don't alias. + let mut local_len = SetLenOnDrop::new(&mut self.len); + // Write all elements except the last one - for i in 1..n { + for _ in 1..n { ptr::write(ptr, value.clone()); ptr = ptr.offset(1); // Increment the length in every step in case clone() panics - self.set_len(len + i); + local_len.increment_len(1); } if n > 0 { // We can write the last element directly without cloning needlessly ptr::write(ptr, value); - self.set_len(len + n); + local_len.increment_len(1); } + + // len set by scope guard } } @@ -1085,20 +1091,56 @@ impl Vec { pub fn extend_from_slice(&mut self, other: &[T]) { self.reserve(other.len()); - for i in 0..other.len() { + // Unsafe code so this can be optimised to a memcpy (or something + // similarly fast) when T is Copy. LLVM is easily confused, so any + // extra operations during the loop can prevent this optimisation. + unsafe { let len = self.len(); - - // Unsafe code so this can be optimised to a memcpy (or something - // similarly fast) when T is Copy. LLVM is easily confused, so any - // extra operations during the loop can prevent this optimisation. - unsafe { - ptr::write(self.get_unchecked_mut(len), other.get_unchecked(i).clone()); - self.set_len(len + 1); + let ptr = self.get_unchecked_mut(len) as *mut T; + // Use SetLenOnDrop to work around bug where compiler + // may not realize the store through `ptr` trough self.set_len() + // don't alias. + let mut local_len = SetLenOnDrop::new(&mut self.len); + + for i in 0..other.len() { + ptr::write(ptr.offset(i as isize), other.get_unchecked(i).clone()); + local_len.increment_len(1); } + + // len set by scope guard } } } +// Set the length of the vec when the `SetLenOnDrop` value goes out of scope. +// +// The idea is: The length field in SetLenOnDrop is a local variable +// that the optimizer will see does not alias with any stores through the Vec's data +// pointer. This is a workaround for alias analysis issue #32155 +struct SetLenOnDrop<'a> { + len: &'a mut usize, + local_len: usize, +} + +impl<'a> SetLenOnDrop<'a> { + #[inline] + fn new(len: &'a mut usize) -> Self { + SetLenOnDrop { local_len: *len, len: len } + } + + #[inline] + fn increment_len(&mut self, increment: usize) { + self.local_len += increment; + } +} + +impl<'a> Drop for SetLenOnDrop<'a> { + #[inline] + fn drop(&mut self) { + *self.len = self.local_len; + } +} + impl Vec { /// Removes consecutive repeated elements in the vector. /// @@ -1600,11 +1642,9 @@ impl Ord for Vec { impl Drop for Vec { #[unsafe_destructor_blind_to_params] fn drop(&mut self) { - if self.buf.unsafe_no_drop_flag_needs_drop() { - unsafe { - // use drop for [T] - ptr::drop_in_place(&mut self[..]); - } + unsafe { + // use drop for [T] + ptr::drop_in_place(&mut self[..]); } // RawVec handles deallocation } @@ -1612,6 +1652,7 @@ impl Drop for Vec { #[stable(feature = "rust1", since = "1.0.0")] impl Default for Vec { + /// Creates an empty `Vec`. fn default() -> Vec { Vec::new() } @@ -1715,6 +1756,15 @@ pub struct IntoIter { end: *const T, } +#[stable(feature = "vec_intoiter_debug", since = "")] +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("IntoIter") + .field(&self.as_slice()) + .finish() + } +} + impl IntoIter { /// Returns the remaining items of this iterator as a slice. /// @@ -1836,6 +1886,9 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + #[stable(feature = "vec_into_iter_clone", since = "1.8.0")] impl Clone for IntoIter { fn clone(&self) -> IntoIter { @@ -1923,3 +1976,6 @@ impl<'a, T> Drop for Drain<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Drain<'a, T> {} + +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Drain<'a, T> {} diff --git a/src/libcollections/vec_deque.rs b/src/libcollections/vec_deque.rs index aa42daec4c..452e9f7482 100644 --- a/src/libcollections/vec_deque.rs +++ b/src/libcollections/vec_deque.rs @@ -20,10 +20,11 @@ use core::cmp::Ordering; use core::fmt; -use core::iter::{repeat, FromIterator}; +use core::iter::{repeat, FromIterator, FusedIterator}; use core::mem; use core::ops::{Index, IndexMut}; use core::ptr; +use core::ptr::Shared; use core::slice; use core::hash::{Hash, Hasher}; @@ -83,6 +84,7 @@ impl Drop for VecDeque { #[stable(feature = "rust1", since = "1.0.0")] impl Default for VecDeque { + /// Creates an empty `VecDeque`. #[inline] fn default() -> VecDeque { VecDeque::new() @@ -724,18 +726,18 @@ impl VecDeque { /// ``` /// use std::collections::VecDeque; /// - /// let mut vector: VecDeque = VecDeque::new(); + /// let mut vector = VecDeque::new(); /// /// vector.push_back(0); /// vector.push_back(1); /// vector.push_back(2); /// - /// assert_eq!(vector.as_slices(), (&[0u32, 1, 2] as &[u32], &[] as &[u32])); + /// assert_eq!(vector.as_slices(), (&[0, 1, 2][..], &[][..])); /// /// vector.push_front(10); /// vector.push_front(9); /// - /// assert_eq!(vector.as_slices(), (&[9u32, 10] as &[u32], &[0u32, 1, 2] as &[u32])); + /// assert_eq!(vector.as_slices(), (&[9, 10][..], &[0, 1, 2][..])); /// ``` #[inline] #[stable(feature = "deque_extras_15", since = "1.5.0")] @@ -762,7 +764,7 @@ impl VecDeque { /// ``` /// use std::collections::VecDeque; /// - /// let mut vector: VecDeque = VecDeque::new(); + /// let mut vector = VecDeque::new(); /// /// vector.push_back(0); /// vector.push_back(1); @@ -772,7 +774,7 @@ impl VecDeque { /// /// vector.as_mut_slices().0[0] = 42; /// vector.as_mut_slices().1[0] = 24; - /// assert_eq!(vector.as_slices(), (&[42u32, 10] as &[u32], &[24u32, 1] as &[u32])); + /// assert_eq!(vector.as_slices(), (&[42, 10][..], &[24, 1][..])); /// ``` #[inline] #[stable(feature = "deque_extras_15", since = "1.5.0")] @@ -903,7 +905,7 @@ impl VecDeque { self.head = drain_tail; Drain { - deque: self as *mut _, + deque: unsafe { Shared::new(self as *mut _) }, after_tail: drain_head, after_head: head, iter: Iter { @@ -1890,6 +1892,10 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Iter<'a, T> {} + + /// `VecDeque` mutable iterator. #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T: 'a> { @@ -1942,6 +1948,9 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for IterMut<'a, T> {} + /// A by-value VecDeque iterator #[derive(Clone)] #[stable(feature = "rust1", since = "1.0.0")] @@ -1976,13 +1985,16 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + /// A draining VecDeque iterator #[stable(feature = "drain", since = "1.6.0")] pub struct Drain<'a, T: 'a> { after_tail: usize, after_head: usize, iter: Iter<'a, T>, - deque: *mut VecDeque, + deque: Shared>, } #[stable(feature = "drain", since = "1.6.0")] @@ -1995,7 +2007,7 @@ impl<'a, T: 'a> Drop for Drain<'a, T> { fn drop(&mut self) { for _ in self.by_ref() {} - let source_deque = unsafe { &mut *self.deque }; + let source_deque = unsafe { &mut **self.deque }; // T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head // @@ -2065,6 +2077,9 @@ impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T: 'a> FusedIterator for Drain<'a, T> {} + #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for VecDeque { fn eq(&self, other: &VecDeque) -> bool { @@ -2318,9 +2333,6 @@ impl From> for Vec { #[cfg(test)] mod tests { - use core::iter::Iterator; - use core::option::Option::Some; - use test; use super::VecDeque; diff --git a/src/libcollectionstest/binary_heap.rs b/src/libcollectionstest/binary_heap.rs index e2a57bd8d3..faabcf4c37 100644 --- a/src/libcollectionstest/binary_heap.rs +++ b/src/libcollectionstest/binary_heap.rs @@ -139,6 +139,7 @@ fn test_push_unique() { } #[test] +#[allow(deprecated)] fn test_push_pop() { let mut heap = BinaryHeap::from(vec![5, 5, 2, 1, 3]); assert_eq!(heap.len(), 5); @@ -153,6 +154,7 @@ fn test_push_pop() { } #[test] +#[allow(deprecated)] fn test_replace() { let mut heap = BinaryHeap::from(vec![5, 5, 2, 1, 3]); assert_eq!(heap.len(), 5); @@ -212,6 +214,7 @@ fn test_empty_peek_mut() { } #[test] +#[allow(deprecated)] fn test_empty_replace() { let mut heap = BinaryHeap::new(); assert!(heap.replace(5).is_none()); diff --git a/src/libcollectionstest/btree/set.rs b/src/libcollectionstest/btree/set.rs index f7b647d777..a32e3f1a76 100644 --- a/src/libcollectionstest/btree/set.rs +++ b/src/libcollectionstest/btree/set.rs @@ -39,30 +39,8 @@ fn test_hash() { assert!(::hash(&x) == ::hash(&y)); } -struct Counter<'a, 'b> { - i: &'a mut usize, - expected: &'b [i32], -} - -impl<'a, 'b, 'c> FnMut<(&'c i32,)> for Counter<'a, 'b> { - extern "rust-call" fn call_mut(&mut self, (&x,): (&'c i32,)) -> bool { - assert_eq!(x, self.expected[*self.i]); - *self.i += 1; - true - } -} - -impl<'a, 'b, 'c> FnOnce<(&'c i32,)> for Counter<'a, 'b> { - type Output = bool; - - extern "rust-call" fn call_once(mut self, args: (&'c i32,)) -> bool { - self.call_mut(args) - } -} - fn check(a: &[i32], b: &[i32], expected: &[i32], f: F) where - // FIXME Replace Counter with `Box _>` - F: FnOnce(&BTreeSet, &BTreeSet, Counter) -> bool, + F: FnOnce(&BTreeSet, &BTreeSet, &mut FnMut(&i32) -> bool) -> bool, { let mut set_a = BTreeSet::new(); let mut set_b = BTreeSet::new(); @@ -71,7 +49,11 @@ fn check(a: &[i32], b: &[i32], expected: &[i32], f: F) where for y in b { assert!(set_b.insert(*y)) } let mut i = 0; - f(&set_a, &set_b, Counter { i: &mut i, expected: expected }); + f(&set_a, &set_b, &mut |&x| { + assert_eq!(x, expected[i]); + i += 1; + true + }); assert_eq!(i, expected.len()); } diff --git a/src/libcollectionstest/lib.rs b/src/libcollectionstest/lib.rs index f448fcf2db..950e6ee2e9 100644 --- a/src/libcollectionstest/lib.rs +++ b/src/libcollectionstest/lib.rs @@ -16,12 +16,12 @@ #![feature(collections)] #![feature(collections_bound)] #![feature(const_fn)] -#![feature(fn_traits)] #![feature(enumset)] #![feature(pattern)] #![feature(rand)] #![feature(step_by)] #![feature(str_escape)] +#![feature(str_replacen)] #![feature(test)] #![feature(unboxed_closures)] #![feature(unicode)] @@ -31,7 +31,8 @@ extern crate collections; extern crate test; extern crate rustc_unicode; -use std::hash::{Hash, Hasher, SipHasher}; +use std::hash::{Hash, Hasher}; +use std::collections::hash_map::DefaultHasher; #[cfg(test)] #[macro_use] mod bench; @@ -47,7 +48,7 @@ mod vec_deque; mod vec; fn hash(t: &T) -> u64 { - let mut s = SipHasher::new(); + let mut s = DefaultHasher::new(); t.hash(&mut s); s.finish() } diff --git a/src/libcollectionstest/slice.rs b/src/libcollectionstest/slice.rs index 71416f2069..5b341ab62d 100644 --- a/src/libcollectionstest/slice.rs +++ b/src/libcollectionstest/slice.rs @@ -645,6 +645,24 @@ fn test_iter_size_hints() { assert_eq!(xs.iter_mut().size_hint(), (5, Some(5))); } +#[test] +fn test_iter_as_slice() { + let xs = [1, 2, 5, 10, 11]; + let mut iter = xs.iter(); + assert_eq!(iter.as_slice(), &[1, 2, 5, 10, 11]); + iter.next(); + assert_eq!(iter.as_slice(), &[2, 5, 10, 11]); +} + +#[test] +fn test_iter_as_ref() { + let xs = [1, 2, 5, 10, 11]; + let mut iter = xs.iter(); + assert_eq!(iter.as_ref(), &[1, 2, 5, 10, 11]); + iter.next(); + assert_eq!(iter.as_ref(), &[2, 5, 10, 11]); +} + #[test] fn test_iter_clone() { let xs = [1, 2, 5]; diff --git a/src/libcollectionstest/str.rs b/src/libcollectionstest/str.rs index a61925cd3b..62e164a569 100644 --- a/src/libcollectionstest/str.rs +++ b/src/libcollectionstest/str.rs @@ -218,6 +218,20 @@ fn test_is_empty() { assert!(!"a".is_empty()); } +#[test] +fn test_replacen() { + assert_eq!("".replacen('a', "b", 5), ""); + assert_eq!("acaaa".replacen("a", "b", 3), "bcbba"); + assert_eq!("aaaa".replacen("a", "b", 0), "aaaa"); + + let test = "test"; + assert_eq!(" test test ".replacen(test, "toast", 3), " toast toast "); + assert_eq!(" test test ".replacen(test, "toast", 0), " test test "); + assert_eq!(" test test ".replacen(test, "", 5), " "); + + assert_eq!("qwer123zxc789".replacen(char::is_numeric, "", 3), "qwerzxc789"); +} + #[test] fn test_replace() { let a = "a"; diff --git a/src/libcollectionstest/vec.rs b/src/libcollectionstest/vec.rs index 537fabf8ab..ee2b898d5c 100644 --- a/src/libcollectionstest/vec.rs +++ b/src/libcollectionstest/vec.rs @@ -501,6 +501,14 @@ fn test_into_iter_as_mut_slice() { assert_eq!(into_iter.as_slice(), &['y', 'c']); } +#[test] +fn test_into_iter_debug() { + let vec = vec!['a', 'b', 'c']; + let into_iter = vec.into_iter(); + let debug = format!("{:?}", into_iter); + assert_eq!(debug, "IntoIter(['a', 'b', 'c'])"); +} + #[test] fn test_into_iter_count() { assert_eq!(vec![1, 2, 3].into_iter().count(), 3); diff --git a/src/libcollectionstest/vec_deque.rs b/src/libcollectionstest/vec_deque.rs index a02666a50c..5e8633a974 100644 --- a/src/libcollectionstest/vec_deque.rs +++ b/src/libcollectionstest/vec_deque.rs @@ -10,6 +10,7 @@ use std::collections::VecDeque; use std::fmt::Debug; +use std::collections::vec_deque::Drain; use test; @@ -999,3 +1000,8 @@ fn test_contains() { assert!(!v.contains(&3)); } + +#[allow(dead_code)] +fn assert_covariance() { + fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> { d } +} diff --git a/src/libcompiler_builtins/Cargo.toml b/src/libcompiler_builtins/Cargo.toml new file mode 100644 index 0000000000..a52873fc32 --- /dev/null +++ b/src/libcompiler_builtins/Cargo.toml @@ -0,0 +1,15 @@ +[package] +authors = ["The Rust Project Developers"] +build = "build.rs" +name = "compiler_builtins" +version = "0.0.0" + +[lib] +name = "compiler_builtins" +path = "lib.rs" + +[dependencies] +core = { path = "../libcore" } + +[build-dependencies] +gcc = "0.3.27" diff --git a/src/libcompiler_builtins/build.rs b/src/libcompiler_builtins/build.rs new file mode 100644 index 0000000000..66c683333b --- /dev/null +++ b/src/libcompiler_builtins/build.rs @@ -0,0 +1,404 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Compiles the `compiler-rt` library, or at least the builtins part of it. +//! +//! Note that while compiler-rt has a build system associated with it, we +//! specifically don't use it here. The compiler-rt build system, written in +//! CMake, is actually *very* difficult to work with in terms of getting it to +//! compile on all the relevant platforms we want it to compile on. In the end +//! it became so much pain to work with local patches, work around the oddities +//! of the build system, etc, that we're just building everything by hand now. +//! +//! In general compiler-rt is just a bunch of intrinsics that are in practice +//! *very* stable. We just need to make sure that all the relevant functions and +//! such are compiled somewhere and placed in an object file somewhere. +//! Eventually, these should all be written in Rust! +//! +//! So below you'll find a listing of every single file in the compiler-rt repo +//! that we're compiling. We just reach in and compile with the `gcc` crate +//! which should have all the relevant flags and such already configured. +//! +//! The risk here is that if we update compiler-rt we may need to compile some +//! new intrinsics, but to be honest we surely don't use all of the intrinsics +//! listed below today so the likelihood of us actually needing a new intrinsic +//! is quite low. The failure case is also just that someone reports a link +//! error (if any) and then we just add it to the list. Overall, that cost is +//! far far less than working with compiler-rt's build system over time. + +extern crate gcc; + +use std::collections::BTreeMap; +use std::env; +use std::path::Path; + +struct Sources { + // SYMBOL -> PATH TO SOURCE + map: BTreeMap<&'static str, &'static str>, +} + +impl Sources { + fn new() -> Sources { + Sources { map: BTreeMap::new() } + } + + fn extend(&mut self, sources: &[&'static str]) { + // NOTE Some intrinsics have both a generic implementation (e.g. + // `floatdidf.c`) and an arch optimized implementation + // (`x86_64/floatdidf.c`). In those cases, we keep the arch optimized + // implementation and discard the generic implementation. If we don't + // and keep both implementations, the linker will yell at us about + // duplicate symbols! + for &src in sources { + let symbol = Path::new(src).file_stem().unwrap().to_str().unwrap(); + if src.contains("/") { + // Arch-optimized implementation (preferred) + self.map.insert(symbol, src); + } else { + // Generic implementation + if !self.map.contains_key(symbol) { + self.map.insert(symbol, src); + } + } + } + } +} + +fn main() { + let target = env::var("TARGET").expect("TARGET was not set"); + let cfg = &mut gcc::Config::new(); + + if target.contains("msvc") { + // Don't pull in extra libraries on MSVC + cfg.flag("/Zl"); + + // Emulate C99 and C++11's __func__ for MSVC prior to 2013 CTP + cfg.define("__func__", Some("__FUNCTION__")); + } else { + // Turn off various features of gcc and such, mostly copying + // compiler-rt's build system already + cfg.flag("-fno-builtin"); + cfg.flag("-fvisibility=hidden"); + cfg.flag("-fomit-frame-pointer"); + cfg.flag("-ffreestanding"); + } + + let mut sources = Sources::new(); + sources.extend(&["absvdi2.c", + "absvsi2.c", + "adddf3.c", + "addsf3.c", + "addvdi3.c", + "addvsi3.c", + "apple_versioning.c", + "ashldi3.c", + "ashrdi3.c", + "clear_cache.c", + "clzdi2.c", + "clzsi2.c", + "cmpdi2.c", + "comparedf2.c", + "comparesf2.c", + "ctzdi2.c", + "ctzsi2.c", + "divdc3.c", + "divdf3.c", + "divdi3.c", + "divmoddi4.c", + "divmodsi4.c", + "divsc3.c", + "divsf3.c", + "divsi3.c", + "divxc3.c", + "extendsfdf2.c", + "extendhfsf2.c", + "ffsdi2.c", + "fixdfdi.c", + "fixdfsi.c", + "fixsfdi.c", + "fixsfsi.c", + "fixunsdfdi.c", + "fixunsdfsi.c", + "fixunssfdi.c", + "fixunssfsi.c", + "fixunsxfdi.c", + "fixunsxfsi.c", + "fixxfdi.c", + "floatdidf.c", + "floatdisf.c", + "floatdixf.c", + "floatsidf.c", + "floatsisf.c", + "floatundidf.c", + "floatundisf.c", + "floatundixf.c", + "floatunsidf.c", + "floatunsisf.c", + "int_util.c", + "lshrdi3.c", + "moddi3.c", + "modsi3.c", + "muldc3.c", + "muldf3.c", + "muldi3.c", + "mulodi4.c", + "mulosi4.c", + "muloti4.c", + "mulsc3.c", + "mulsf3.c", + "mulvdi3.c", + "mulvsi3.c", + "mulxc3.c", + "negdf2.c", + "negdi2.c", + "negsf2.c", + "negvdi2.c", + "negvsi2.c", + "paritydi2.c", + "paritysi2.c", + "popcountdi2.c", + "popcountsi2.c", + "powidf2.c", + "powisf2.c", + "powixf2.c", + "subdf3.c", + "subsf3.c", + "subvdi3.c", + "subvsi3.c", + "truncdfhf2.c", + "truncdfsf2.c", + "truncsfhf2.c", + "ucmpdi2.c", + "udivdi3.c", + "udivmoddi4.c", + "udivmodsi4.c", + "udivsi3.c", + "umoddi3.c", + "umodsi3.c"]); + + if !target.contains("ios") { + sources.extend(&["absvti2.c", + "addtf3.c", + "addvti3.c", + "ashlti3.c", + "ashrti3.c", + "clzti2.c", + "cmpti2.c", + "ctzti2.c", + "divtf3.c", + "divti3.c", + "ffsti2.c", + "fixdfti.c", + "fixsfti.c", + "fixunsdfti.c", + "fixunssfti.c", + "fixunsxfti.c", + "fixxfti.c", + "floattidf.c", + "floattisf.c", + "floattixf.c", + "floatuntidf.c", + "floatuntisf.c", + "floatuntixf.c", + "lshrti3.c", + "modti3.c", + "multf3.c", + "multi3.c", + "mulvti3.c", + "negti2.c", + "negvti2.c", + "parityti2.c", + "popcountti2.c", + "powitf2.c", + "subtf3.c", + "subvti3.c", + "trampoline_setup.c", + "ucmpti2.c", + "udivmodti4.c", + "udivti3.c", + "umodti3.c"]); + } + + if target.contains("apple") { + sources.extend(&["atomic_flag_clear.c", + "atomic_flag_clear_explicit.c", + "atomic_flag_test_and_set.c", + "atomic_flag_test_and_set_explicit.c", + "atomic_signal_fence.c", + "atomic_thread_fence.c"]); + } + + if !target.contains("windows") { + sources.extend(&["emutls.c"]); + } + + if target.contains("msvc") { + if target.contains("x86_64") { + sources.extend(&["x86_64/floatdidf.c", "x86_64/floatdisf.c", "x86_64/floatdixf.c"]); + } + } else { + if !target.contains("freebsd") { + sources.extend(&["gcc_personality_v0.c"]); + } + + if target.contains("x86_64") { + sources.extend(&["x86_64/chkstk.S", + "x86_64/chkstk2.S", + "x86_64/floatdidf.c", + "x86_64/floatdisf.c", + "x86_64/floatdixf.c", + "x86_64/floatundidf.S", + "x86_64/floatundisf.S", + "x86_64/floatundixf.S"]); + } + + if target.contains("i386") || target.contains("i586") || target.contains("i686") { + sources.extend(&["i386/ashldi3.S", + "i386/ashrdi3.S", + "i386/chkstk.S", + "i386/chkstk2.S", + "i386/divdi3.S", + "i386/floatdidf.S", + "i386/floatdisf.S", + "i386/floatdixf.S", + "i386/floatundidf.S", + "i386/floatundisf.S", + "i386/floatundixf.S", + "i386/lshrdi3.S", + "i386/moddi3.S", + "i386/muldi3.S", + "i386/udivdi3.S", + "i386/umoddi3.S"]); + } + } + + if target.contains("arm") && !target.contains("ios") { + sources.extend(&["arm/aeabi_cdcmp.S", + "arm/aeabi_cdcmpeq_check_nan.c", + "arm/aeabi_cfcmp.S", + "arm/aeabi_cfcmpeq_check_nan.c", + "arm/aeabi_dcmp.S", + "arm/aeabi_div0.c", + "arm/aeabi_drsub.c", + "arm/aeabi_fcmp.S", + "arm/aeabi_frsub.c", + "arm/aeabi_idivmod.S", + "arm/aeabi_ldivmod.S", + "arm/aeabi_memcmp.S", + "arm/aeabi_memcpy.S", + "arm/aeabi_memmove.S", + "arm/aeabi_memset.S", + "arm/aeabi_uidivmod.S", + "arm/aeabi_uldivmod.S", + "arm/bswapdi2.S", + "arm/bswapsi2.S", + "arm/clzdi2.S", + "arm/clzsi2.S", + "arm/comparesf2.S", + "arm/divmodsi4.S", + "arm/divsi3.S", + "arm/modsi3.S", + "arm/switch16.S", + "arm/switch32.S", + "arm/switch8.S", + "arm/switchu8.S", + "arm/sync_synchronize.S", + "arm/udivmodsi4.S", + "arm/udivsi3.S", + "arm/umodsi3.S"]); + } + + if target.contains("armv7") { + sources.extend(&["arm/sync_fetch_and_add_4.S", + "arm/sync_fetch_and_add_8.S", + "arm/sync_fetch_and_and_4.S", + "arm/sync_fetch_and_and_8.S", + "arm/sync_fetch_and_max_4.S", + "arm/sync_fetch_and_max_8.S", + "arm/sync_fetch_and_min_4.S", + "arm/sync_fetch_and_min_8.S", + "arm/sync_fetch_and_nand_4.S", + "arm/sync_fetch_and_nand_8.S", + "arm/sync_fetch_and_or_4.S", + "arm/sync_fetch_and_or_8.S", + "arm/sync_fetch_and_sub_4.S", + "arm/sync_fetch_and_sub_8.S", + "arm/sync_fetch_and_umax_4.S", + "arm/sync_fetch_and_umax_8.S", + "arm/sync_fetch_and_umin_4.S", + "arm/sync_fetch_and_umin_8.S", + "arm/sync_fetch_and_xor_4.S", + "arm/sync_fetch_and_xor_8.S"]); + } + + if target.contains("eabihf") { + sources.extend(&["arm/adddf3vfp.S", + "arm/addsf3vfp.S", + "arm/divdf3vfp.S", + "arm/divsf3vfp.S", + "arm/eqdf2vfp.S", + "arm/eqsf2vfp.S", + "arm/extendsfdf2vfp.S", + "arm/fixdfsivfp.S", + "arm/fixsfsivfp.S", + "arm/fixunsdfsivfp.S", + "arm/fixunssfsivfp.S", + "arm/floatsidfvfp.S", + "arm/floatsisfvfp.S", + "arm/floatunssidfvfp.S", + "arm/floatunssisfvfp.S", + "arm/gedf2vfp.S", + "arm/gesf2vfp.S", + "arm/gtdf2vfp.S", + "arm/gtsf2vfp.S", + "arm/ledf2vfp.S", + "arm/lesf2vfp.S", + "arm/ltdf2vfp.S", + "arm/ltsf2vfp.S", + "arm/muldf3vfp.S", + "arm/mulsf3vfp.S", + "arm/negdf2vfp.S", + "arm/negsf2vfp.S", + "arm/nedf2vfp.S", + "arm/nesf2vfp.S", + "arm/restore_vfp_d8_d15_regs.S", + "arm/save_vfp_d8_d15_regs.S", + "arm/subdf3vfp.S", + "arm/subsf3vfp.S", + "arm/truncdfsf2vfp.S", + "arm/unorddf2vfp.S", + "arm/unordsf2vfp.S"]); + } + + if target.contains("aarch64") { + sources.extend(&["comparetf2.c", + "extenddftf2.c", + "extendsftf2.c", + "fixtfdi.c", + "fixtfsi.c", + "fixtfti.c", + "fixunstfdi.c", + "fixunstfsi.c", + "fixunstfti.c", + "floatditf.c", + "floatsitf.c", + "floatunditf.c", + "floatunsitf.c", + "multc3.c", + "trunctfdf2.c", + "trunctfsf2.c"]); + } + + for src in sources.map.values() { + cfg.file(Path::new("../compiler-rt/lib/builtins").join(src)); + } + + cfg.compile("libcompiler-rt.a"); +} diff --git a/src/libcompiler_builtins/lib.rs b/src/libcompiler_builtins/lib.rs new file mode 100644 index 0000000000..fbcf5204d2 --- /dev/null +++ b/src/libcompiler_builtins/lib.rs @@ -0,0 +1,19 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![cfg_attr(not(stage0), feature(compiler_builtins))] +#![no_std] +#![cfg_attr(not(stage0), compiler_builtins)] +#![unstable(feature = "compiler_builtins_lib", + reason = "internal implementation detail of rustc right now", + issue = "0")] +#![crate_name = "compiler_builtins"] +#![crate_type = "rlib"] +#![feature(staged_api)] diff --git a/src/libcore/any.rs b/src/libcore/any.rs index a452be2565..f7edcb998a 100644 --- a/src/libcore/any.rs +++ b/src/libcore/any.rs @@ -72,12 +72,7 @@ #![stable(feature = "rust1", since = "1.0.0")] use fmt; -use marker::Send; -use mem::transmute; -use option::Option::{self, Some, None}; -use raw::TraitObject; use intrinsics; -use marker::{Reflect, Sized}; /////////////////////////////////////////////////////////////////////////////// // Any trait @@ -90,7 +85,7 @@ use marker::{Reflect, Sized}; /// /// [mod]: index.html #[stable(feature = "rust1", since = "1.0.0")] -pub trait Any: Reflect + 'static { +pub trait Any: 'static { /// Gets the `TypeId` of `self`. /// /// # Examples @@ -116,7 +111,7 @@ pub trait Any: Reflect + 'static { } #[stable(feature = "rust1", since = "1.0.0")] -impl Any for T { +impl Any for T { fn get_type_id(&self) -> TypeId { TypeId::of::() } } @@ -201,11 +196,7 @@ impl Any { pub fn downcast_ref(&self) -> Option<&T> { if self.is::() { unsafe { - // Get the raw representation of the trait object - let to: TraitObject = transmute(self); - - // Extract the data pointer - Some(&*(to.data as *const T)) + Some(&*(self as *const Any as *const T)) } } else { None @@ -242,11 +233,7 @@ impl Any { pub fn downcast_mut(&mut self) -> Option<&mut T> { if self.is::() { unsafe { - // Get the raw representation of the trait object - let to: TraitObject = transmute(self); - - // Extract the data pointer - Some(&mut *(to.data as *const T as *mut T)) + Some(&mut *(self as *mut Any as *mut T)) } } else { None @@ -378,7 +365,7 @@ impl TypeId { /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn of() -> TypeId { + pub fn of() -> TypeId { TypeId { t: unsafe { intrinsics::type_id::() }, } diff --git a/src/libcore/array.rs b/src/libcore/array.rs index 45fc5ff800..9866a39619 100644 --- a/src/libcore/array.rs +++ b/src/libcore/array.rs @@ -20,16 +20,11 @@ issue = "27778")] use borrow::{Borrow, BorrowMut}; -use clone::Clone; -use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering}; -use convert::{AsRef, AsMut}; -use default::Default; +use cmp::Ordering; use fmt; use hash::{Hash, self}; -use iter::IntoIterator; -use marker::{Copy, Sized, Unsize}; -use option::Option; -use slice::{Iter, IterMut, SliceExt}; +use marker::Unsize; +use slice::{Iter, IterMut}; /// Utility trait implemented only on arrays of fixed size /// diff --git a/src/libcore/borrow.rs b/src/libcore/borrow.rs index 79330d3a61..3d223465c8 100644 --- a/src/libcore/borrow.rs +++ b/src/libcore/borrow.rs @@ -12,8 +12,6 @@ #![stable(feature = "rust1", since = "1.0.0")] -use marker::Sized; - /// A trait for borrowing data. /// /// In general, there may be several ways to "borrow" a piece of data. The diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index a388012e1d..64a7a8c5ef 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -119,42 +119,64 @@ //! `Cell`. //! //! ``` +//! #![feature(core_intrinsics)] +//! #![feature(shared)] //! use std::cell::Cell; +//! use std::ptr::Shared; +//! use std::intrinsics::abort; +//! use std::intrinsics::assume; //! -//! struct Rc { -//! ptr: *mut RcBox +//! struct Rc { +//! ptr: Shared> //! } //! -//! struct RcBox { -//! # #[allow(dead_code)] +//! struct RcBox { +//! strong: Cell, +//! refcount: Cell, //! value: T, -//! refcount: Cell //! } //! -//! impl Clone for Rc { +//! impl Clone for Rc { //! fn clone(&self) -> Rc { -//! unsafe { -//! (*self.ptr).refcount.set((*self.ptr).refcount.get() + 1); -//! Rc { ptr: self.ptr } -//! } +//! self.inc_strong(); +//! Rc { ptr: self.ptr } +//! } +//! } +//! +//! trait RcBoxPtr { +//! +//! fn inner(&self) -> &RcBox; +//! +//! fn strong(&self) -> usize { +//! self.inner().strong.get() +//! } +//! +//! fn inc_strong(&self) { +//! self.inner() +//! .strong +//! .set(self.strong() +//! .checked_add(1) +//! .unwrap_or_else(|| unsafe { abort() })); //! } //! } +//! +//! impl RcBoxPtr for Rc { +//! fn inner(&self) -> &RcBox { +//! unsafe { +//! assume(!(*(&self.ptr as *const _ as *const *const ())).is_null()); +//! &(**self.ptr) +//! } +//! } +//! } //! ``` //! #![stable(feature = "rust1", since = "1.0.0")] -use clone::Clone; -use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering}; -use convert::From; -use default::Default; +use cmp::Ordering; use fmt::{self, Debug, Display}; -use marker::{Copy, PhantomData, Send, Sync, Sized, Unsize}; -use ops::{Deref, DerefMut, Drop, FnOnce, CoerceUnsized}; -use option::Option; -use option::Option::{None, Some}; -use result::Result; -use result::Result::{Ok, Err}; +use marker::Unsize; +use ops::{Deref, DerefMut, CoerceUnsized}; /// A mutable memory location that admits only `Copy` data. /// @@ -295,6 +317,7 @@ impl Clone for Cell { #[stable(feature = "rust1", since = "1.0.0")] impl Default for Cell { + /// Creates a `Cell`, with the `Default` value for T. #[inline] fn default() -> Cell { Cell::new(Default::default()) @@ -355,6 +378,9 @@ impl From for Cell { } } +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl, U> CoerceUnsized> for Cell {} + /// A mutable memory location with dynamically checked borrow rules /// /// See the [module-level documentation](index.html) for more. @@ -377,40 +403,40 @@ pub enum BorrowState { } /// An error returned by [`RefCell::try_borrow`](struct.RefCell.html#method.try_borrow). -#[unstable(feature = "try_borrow", issue = "35070")] -pub struct BorrowError<'a, T: 'a + ?Sized> { - marker: PhantomData<&'a RefCell>, +#[stable(feature = "try_borrow", since = "1.13.0")] +pub struct BorrowError { + _private: (), } -#[unstable(feature = "try_borrow", issue = "35070")] -impl<'a, T: ?Sized> Debug for BorrowError<'a, T> { +#[stable(feature = "try_borrow", since = "1.13.0")] +impl Debug for BorrowError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("BorrowError").finish() } } -#[unstable(feature = "try_borrow", issue = "35070")] -impl<'a, T: ?Sized> Display for BorrowError<'a, T> { +#[stable(feature = "try_borrow", since = "1.13.0")] +impl Display for BorrowError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Display::fmt("already mutably borrowed", f) } } /// An error returned by [`RefCell::try_borrow_mut`](struct.RefCell.html#method.try_borrow_mut). -#[unstable(feature = "try_borrow", issue = "35070")] -pub struct BorrowMutError<'a, T: 'a + ?Sized> { - marker: PhantomData<&'a RefCell>, +#[stable(feature = "try_borrow", since = "1.13.0")] +pub struct BorrowMutError { + _private: (), } -#[unstable(feature = "try_borrow", issue = "35070")] -impl<'a, T: ?Sized> Debug for BorrowMutError<'a, T> { +#[stable(feature = "try_borrow", since = "1.13.0")] +impl Debug for BorrowMutError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("BorrowMutError").finish() } } -#[unstable(feature = "try_borrow", issue = "35070")] -impl<'a, T: ?Sized> Display for BorrowMutError<'a, T> { +#[stable(feature = "try_borrow", since = "1.13.0")] +impl Display for BorrowMutError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Display::fmt("already borrowed", f) } @@ -547,8 +573,6 @@ impl RefCell { /// # Examples /// /// ``` - /// #![feature(try_borrow)] - /// /// use std::cell::RefCell; /// /// let c = RefCell::new(5); @@ -563,15 +587,15 @@ impl RefCell { /// assert!(c.try_borrow().is_ok()); /// } /// ``` - #[unstable(feature = "try_borrow", issue = "35070")] + #[stable(feature = "try_borrow", since = "1.13.0")] #[inline] - pub fn try_borrow(&self) -> Result, BorrowError> { + pub fn try_borrow(&self) -> Result, BorrowError> { match BorrowRef::new(&self.borrow) { Some(b) => Ok(Ref { value: unsafe { &*self.value.get() }, borrow: b, }), - None => Err(BorrowError { marker: PhantomData }), + None => Err(BorrowError { _private: () }), } } @@ -628,8 +652,6 @@ impl RefCell { /// # Examples /// /// ``` - /// #![feature(try_borrow)] - /// /// use std::cell::RefCell; /// /// let c = RefCell::new(5); @@ -641,15 +663,15 @@ impl RefCell { /// /// assert!(c.try_borrow_mut().is_ok()); /// ``` - #[unstable(feature = "try_borrow", issue = "35070")] + #[stable(feature = "try_borrow", since = "1.13.0")] #[inline] - pub fn try_borrow_mut(&self) -> Result, BorrowMutError> { + pub fn try_borrow_mut(&self) -> Result, BorrowMutError> { match BorrowRefMut::new(&self.borrow) { Some(b) => Ok(RefMut { value: unsafe { &mut *self.value.get() }, borrow: b, }), - None => Err(BorrowMutError { marker: PhantomData }), + None => Err(BorrowMutError { _private: () }), } } @@ -733,6 +755,7 @@ impl Clone for RefCell { #[stable(feature = "rust1", since = "1.0.0")] impl Default for RefCell { + /// Creates a `RefCell`, with the `Default` value for T. #[inline] fn default() -> RefCell { RefCell::new(Default::default()) @@ -793,6 +816,9 @@ impl From for RefCell { } } +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl, U> CoerceUnsized> for RefCell {} + struct BorrowRef<'b> { borrow: &'b Cell, } @@ -1111,6 +1137,7 @@ impl UnsafeCell { #[stable(feature = "unsafe_cell_default", since = "1.9.0")] impl Default for UnsafeCell { + /// Creates an `UnsafeCell`, with the `Default` value for T. fn default() -> UnsafeCell { UnsafeCell::new(Default::default()) } @@ -1122,3 +1149,13 @@ impl From for UnsafeCell { UnsafeCell::new(t) } } + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl, U> CoerceUnsized> for UnsafeCell {} + +#[allow(unused)] +fn assert_coerce_unsized(a: UnsafeCell<&i32>, b: Cell<&i32>, c: RefCell<&i32>) { + let _: UnsafeCell<&Send> = a; + let _: Cell<&Send> = b; + let _: RefCell<&Send> = c; +} diff --git a/src/libcore/char.rs b/src/libcore/char.rs index a3440fe8aa..a21d1229d3 100644 --- a/src/libcore/char.rs +++ b/src/libcore/char.rs @@ -15,9 +15,10 @@ #![allow(non_snake_case)] #![stable(feature = "core_char", since = "1.2.0")] -use prelude::v1::*; - use char_private::is_printable; +use convert::TryFrom; +use fmt; +use iter::FusedIterator; use mem::transmute; // UTF-8 ranges and tags for encoding characters @@ -123,12 +124,7 @@ pub const MAX: char = '\u{10ffff}'; #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_u32(i: u32) -> Option { - // catch out-of-bounds and surrogates - if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) { - None - } else { - Some(unsafe { from_u32_unchecked(i) }) - } + char::try_from(i).ok() } /// Converts a `u32` to a `char`, ignoring validity. @@ -176,6 +172,66 @@ pub unsafe fn from_u32_unchecked(i: u32) -> char { transmute(i) } +#[stable(feature = "char_convert", since = "1.13.0")] +impl From for u32 { + #[inline] + fn from(c: char) -> Self { + c as u32 + } +} + +/// Maps a byte in 0x00...0xFF to a `char` whose code point has the same value, in U+0000 to U+00FF. +/// +/// Unicode is designed such that this effectively decodes bytes +/// with the character encoding that IANA calls ISO-8859-1. +/// This encoding is compatible with ASCII. +/// +/// Note that this is different from ISO/IEC 8859-1 a.k.a. ISO 8859-1 (with one less hypen), +/// which leaves some "blanks", byte values that are not assigned to any character. +/// ISO-8859-1 (the IANA one) assigns them to the C0 and C1 control codes. +/// +/// Note that this is *also* different from Windows-1252 a.k.a. code page 1252, +/// which is a superset ISO/IEC 8859-1 that assigns some (not all!) blanks +/// to punctuation and various Latin characters. +/// +/// To confuse things further, [on the Web](https://encoding.spec.whatwg.org/) +/// `ascii`, `iso-8859-1`, and `windows-1252` are all aliases +/// for a superset of Windows-1252 that fills the remaining blanks with corresponding +/// C0 and C1 control codes. +#[stable(feature = "char_convert", since = "1.13.0")] +impl From for char { + #[inline] + fn from(i: u8) -> Self { + i as char + } +} + +#[unstable(feature = "try_from", issue = "33417")] +impl TryFrom for char { + type Err = CharTryFromError; + + #[inline] + fn try_from(i: u32) -> Result { + if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) { + Err(CharTryFromError(())) + } else { + Ok(unsafe { from_u32_unchecked(i) }) + } + } +} + +/// The error type returned when a conversion from u32 to char fails. +#[unstable(feature = "try_from", issue = "33417")] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct CharTryFromError(()); + +#[unstable(feature = "try_from", issue = "33417")] +impl fmt::Display for CharTryFromError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + "converted integer out of range for `char`".fmt(f) + } +} + /// Converts a digit in the given radix to a `char`. /// /// A 'radix' here is sometimes also called a 'base'. A radix of two @@ -516,6 +572,9 @@ impl ExactSizeIterator for EscapeUnicode { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for EscapeUnicode {} + /// An iterator that yields the literal escape code of a `char`. /// /// This `struct` is created by the [`escape_default()`] method on [`char`]. See @@ -616,6 +675,9 @@ impl ExactSizeIterator for EscapeDefault { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for EscapeDefault {} + /// An iterator that yields the literal escape code of a `char`. /// /// This `struct` is created by the [`escape_debug()`] method on [`char`]. See its @@ -637,6 +699,9 @@ impl Iterator for EscapeDebug { #[unstable(feature = "char_escape_debug", issue = "35068")] impl ExactSizeIterator for EscapeDebug { } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for EscapeDebug {} + /// An iterator over `u8` entries represending the UTF-8 encoding of a `char` /// value. /// @@ -675,6 +740,9 @@ impl Iterator for EncodeUtf8 { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for EncodeUtf8 {} + /// An iterator over `u16` entries represending the UTF-16 encoding of a `char` /// value. /// @@ -714,6 +782,8 @@ impl Iterator for EncodeUtf16 { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for EncodeUtf16 {} /// An iterator over an iterator of bytes of the characters the bytes represent /// as UTF-8 @@ -730,33 +800,92 @@ pub fn decode_utf8>(i: I) -> DecodeUtf8 /// `::next` returns this for an invalid input sequence. #[unstable(feature = "decode_utf8", issue = "33906")] -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Eq, Debug)] pub struct InvalidSequence(()); #[unstable(feature = "decode_utf8", issue = "33906")] impl> Iterator for DecodeUtf8 { type Item = Result; #[inline] + fn next(&mut self) -> Option> { - self.0.next().map(|b| { - if b & 0x80 == 0 { Ok(b as char) } else { - let l = (!b).leading_zeros() as usize; // number of bytes in UTF-8 representation - if l < 2 || l > 6 { return Err(InvalidSequence(())) }; - let mut x = (b as u32) & (0x7F >> l); - for _ in 0..l-1 { + self.0.next().map(|first_byte| { + // Emit InvalidSequence according to + // Unicode §5.22 Best Practice for U+FFFD Substitution + // http://www.unicode.org/versions/Unicode9.0.0/ch05.pdf#G40630 + + // Roughly: consume at least one byte, + // then validate one byte at a time and stop before the first unexpected byte + // (which might be the valid start of the next byte sequence). + + let mut code_point; + macro_rules! first_byte { + ($mask: expr) => { + code_point = u32::from(first_byte & $mask) + } + } + macro_rules! continuation_byte { + () => { continuation_byte!(0x80...0xBF) }; + ($range: pat) => { match self.0.peek() { - Some(&b) if b & 0xC0 == 0x80 => { + Some(&byte @ $range) => { + code_point = (code_point << 6) | u32::from(byte & 0b0011_1111); self.0.next(); - x = (x << 6) | (b as u32) & 0x3F; - }, - _ => return Err(InvalidSequence(())), + } + _ => return Err(InvalidSequence(())) } } - match from_u32(x) { - Some(x) if l == x.len_utf8() => Ok(x), - _ => Err(InvalidSequence(())), + } + + match first_byte { + 0x00...0x7F => { + first_byte!(0b1111_1111); + } + 0xC2...0xDF => { + first_byte!(0b0001_1111); + continuation_byte!(); + } + 0xE0 => { + first_byte!(0b0000_1111); + continuation_byte!(0xA0...0xBF); // 0x80...0x9F here are overlong + continuation_byte!(); + } + 0xE1...0xEC | 0xEE...0xEF => { + first_byte!(0b0000_1111); + continuation_byte!(); + continuation_byte!(); + } + 0xED => { + first_byte!(0b0000_1111); + continuation_byte!(0x80...0x9F); // 0xA0..0xBF here are surrogates + continuation_byte!(); } + 0xF0 => { + first_byte!(0b0000_0111); + continuation_byte!(0x90...0xBF); // 0x80..0x8F here are overlong + continuation_byte!(); + continuation_byte!(); + } + 0xF1...0xF3 => { + first_byte!(0b0000_0111); + continuation_byte!(); + continuation_byte!(); + continuation_byte!(); + } + 0xF4 => { + first_byte!(0b0000_0111); + continuation_byte!(0x80...0x8F); // 0x90..0xBF here are beyond char::MAX + continuation_byte!(); + continuation_byte!(); + } + _ => return Err(InvalidSequence(())) // Illegal first byte, overlong, or beyond MAX + } + unsafe { + Ok(from_u32_unchecked(code_point)) } }) } } + +#[unstable(feature = "fused", issue = "35602")] +impl> FusedIterator for DecodeUtf8 {} diff --git a/src/libcore/char_private.rs b/src/libcore/char_private.rs index 1d8f95cd4b..708e7cc15e 100644 --- a/src/libcore/char_private.rs +++ b/src/libcore/char_private.rs @@ -11,8 +11,6 @@ // NOTE: The following code was generated by "src/etc/char_private.py", // do not edit directly! -use slice::SliceExt; - fn check(x: u16, singletons: &[u16], normal: &[u16]) -> bool { for &s in singletons { if x == s { diff --git a/src/libcore/clone.rs b/src/libcore/clone.rs index e8cd36f3cd..0b800cacfc 100644 --- a/src/libcore/clone.rs +++ b/src/libcore/clone.rs @@ -14,10 +14,14 @@ //! assign them or pass them as arguments, the receiver will get a copy, //! leaving the original value in place. These types do not require //! allocation to copy and do not have finalizers (i.e. they do not -//! contain owned boxes or implement `Drop`), so the compiler considers +//! contain owned boxes or implement [`Drop`]), so the compiler considers //! them cheap and safe to copy. For other types copies must be made -//! explicitly, by convention implementing the `Clone` trait and calling -//! the `clone` method. +//! explicitly, by convention implementing the [`Clone`] trait and calling +//! the [`clone`][clone] method. +//! +//! [`Clone`]: trait.Clone.html +//! [clone]: trait.Clone.html#tymethod.clone +//! [`Drop`]: ../../std/ops/trait.Drop.html //! //! Basic usage example: //! @@ -44,26 +48,24 @@ #![stable(feature = "rust1", since = "1.0.0")] -use marker::Sized; - /// A common trait for the ability to explicitly duplicate an object. /// -/// Differs from `Copy` in that `Copy` is implicit and extremely inexpensive, while +/// Differs from [`Copy`] in that [`Copy`] is implicit and extremely inexpensive, while /// `Clone` is always explicit and may or may not be expensive. In order to enforce -/// these characteristics, Rust does not allow you to reimplement `Copy`, but you +/// these characteristics, Rust does not allow you to reimplement [`Copy`], but you /// may reimplement `Clone` and run arbitrary code. /// -/// Since `Clone` is more general than `Copy`, you can automatically make anything -/// `Copy` be `Clone` as well. +/// Since `Clone` is more general than [`Copy`], you can automatically make anything +/// [`Copy`] be `Clone` as well. /// /// ## Derivable /// /// This trait can be used with `#[derive]` if all fields are `Clone`. The `derive`d -/// implementation of `clone()` calls `clone()` on each field. +/// implementation of [`clone()`] calls [`clone()`] on each field. /// /// ## How can I implement `Clone`? /// -/// Types that are `Copy` should have a trivial implementation of `Clone`. More formally: +/// Types that are [`Copy`] should have a trivial implementation of `Clone`. More formally: /// if `T: Copy`, `x: T`, and `y: &T`, then `let x = y.clone();` is equivalent to `let x = *y;`. /// Manual implementations should be careful to uphold this invariant; however, unsafe code /// must not rely on it to ensure memory safety. @@ -72,6 +74,9 @@ use marker::Sized; /// library only implements `Clone` up until arrays of size 32. In this case, the implementation of /// `Clone` cannot be `derive`d, but can be implemented as: /// +/// [`Copy`]: ../../std/marker/trait.Copy.html +/// [`clone()`]: trait.Clone.html#tymethod.clone +/// /// ``` /// #[derive(Copy)] /// struct Stats { @@ -108,10 +113,23 @@ pub trait Clone : Sized { } } -// FIXME(aburka): this method is used solely by #[derive] to -// assert that every component of a type implements Clone. +// FIXME(aburka): these structs are used solely by #[derive] to +// assert that every component of a type implements Clone or Copy. // -// This should never be called by user code. +// These structs should never appear in user code. +#[doc(hidden)] +#[allow(missing_debug_implementations)] +#[unstable(feature = "derive_clone_copy", + reason = "deriving hack, should not be public", + issue = "0")] +pub struct AssertParamIsClone { _field: ::marker::PhantomData } +#[doc(hidden)] +#[allow(missing_debug_implementations)] +#[unstable(feature = "derive_clone_copy", + reason = "deriving hack, should not be public", + issue = "0")] +pub struct AssertParamIsCopy { _field: ::marker::PhantomData } +#[cfg(stage0)] #[doc(hidden)] #[inline(always)] #[unstable(feature = "derive_clone_copy", diff --git a/src/libcore/cmp.rs b/src/libcore/cmp.rs index bb7c971111..f990a27e52 100644 --- a/src/libcore/cmp.rs +++ b/src/libcore/cmp.rs @@ -34,9 +34,6 @@ use self::Ordering::*; -use marker::Sized; -use option::Option::{self, Some}; - /// Trait for equality comparisons which are [partial equivalence /// relations](http://en.wikipedia.org/wiki/Partial_equivalence_relation). /// @@ -132,7 +129,7 @@ pub trait PartialEq { /// This trait can be used with `#[derive]`. When `derive`d, because `Eq` has /// no extra methods, it is only informing the compiler that this is an /// equivalence relation rather than a partial equivalence relation. Note that -/// the `derive` strategy requires all fields are `PartialEq`, which isn't +/// the `derive` strategy requires all fields are `Eq`, which isn't /// always desired. /// /// ## How can I implement `Eq`? @@ -168,6 +165,17 @@ pub trait Eq: PartialEq { fn assert_receiver_is_total_eq(&self) {} } +// FIXME: this struct is used solely by #[derive] to +// assert that every component of a type implements Eq. +// +// This struct should never appear in user code. +#[doc(hidden)] +#[allow(missing_debug_implementations)] +#[unstable(feature = "derive_eq", + reason = "deriving hack, should not be public", + issue = "0")] +pub struct AssertParamIsEq { _field: ::marker::PhantomData } + /// An `Ordering` is the result of a comparison between two values. /// /// # Examples @@ -386,7 +394,7 @@ impl PartialOrd for Ordering { /// } /// ``` /// -/// You may also find it useful to use `partial_cmp()` on your type`s fields. Here +/// You may also find it useful to use `partial_cmp()` on your type's fields. Here /// is an example of `Person` types who have a floating-point `height` field that /// is the only field to be used for sorting: /// @@ -571,11 +579,7 @@ pub fn max(v1: T, v2: T) -> T { // Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types mod impls { - use cmp::{PartialOrd, Ord, PartialEq, Eq, Ordering}; - use cmp::Ordering::{Less, Greater, Equal}; - use marker::Sized; - use option::Option; - use option::Option::{Some, None}; + use cmp::Ordering::{self, Less, Greater, Equal}; macro_rules! partial_eq_impl { ($($t:ty)*) => ($( @@ -699,38 +703,29 @@ mod impls { ord_impl! { char usize u8 u16 u32 u64 isize i8 i16 i32 i64 } - // Note: This macro is a temporary hack that can be remove once we are building with a compiler - // that supports `!` - macro_rules! not_stage0 { - () => { - #[unstable(feature = "never_type", issue = "35121")] - impl PartialEq for ! { - fn eq(&self, _: &!) -> bool { - *self - } - } - - #[unstable(feature = "never_type", issue = "35121")] - impl Eq for ! {} + #[unstable(feature = "never_type", issue = "35121")] + impl PartialEq for ! { + fn eq(&self, _: &!) -> bool { + *self + } + } - #[unstable(feature = "never_type", issue = "35121")] - impl PartialOrd for ! { - fn partial_cmp(&self, _: &!) -> Option { - *self - } - } + #[unstable(feature = "never_type", issue = "35121")] + impl Eq for ! {} - #[unstable(feature = "never_type", issue = "35121")] - impl Ord for ! { - fn cmp(&self, _: &!) -> Ordering { - *self - } - } + #[unstable(feature = "never_type", issue = "35121")] + impl PartialOrd for ! { + fn partial_cmp(&self, _: &!) -> Option { + *self } } - #[cfg(not(stage0))] - not_stage0!(); + #[unstable(feature = "never_type", issue = "35121")] + impl Ord for ! { + fn cmp(&self, _: &!) -> Ordering { + *self + } + } // & pointers diff --git a/src/libcore/convert.rs b/src/libcore/convert.rs index e68f973d8d..5f16a4f243 100644 --- a/src/libcore/convert.rs +++ b/src/libcore/convert.rs @@ -40,22 +40,25 @@ #![stable(feature = "rust1", since = "1.0.0")] -use marker::Sized; -use result::Result; - /// A cheap, reference-to-reference conversion. /// -/// `AsRef` is very similar to, but different than, `Borrow`. See +/// `AsRef` is very similar to, but different than, [`Borrow`]. See /// [the book][book] for more. /// /// [book]: ../../book/borrow-and-asref.html +/// [`Borrow`]: ../../std/borrow/trait.Borrow.html /// /// **Note: this trait must not fail**. If the conversion can fail, use a dedicated method which -/// returns an `Option` or a `Result`. +/// returns an [`Option`] or a [`Result`]. +/// +/// [`Option`]: ../../std/option/enum.Option.html +/// [`Result`]: ../../std/result/enum.Result.html /// /// # Examples /// -/// Both `String` and `&str` implement `AsRef`: +/// Both [`String`] and `&str` implement `AsRef`: +/// +/// [`String`]: ../../std/string/struct.String.html /// /// ``` /// fn is_hello>(s: T) { @@ -84,7 +87,26 @@ pub trait AsRef { /// A cheap, mutable reference-to-mutable reference conversion. /// /// **Note: this trait must not fail**. If the conversion can fail, use a dedicated method which -/// returns an `Option` or a `Result`. +/// returns an [`Option`] or a [`Result`]. +/// +/// [`Option`]: ../../std/option/enum.Option.html +/// [`Result`]: ../../std/result/enum.Result.html +/// +/// # Examples +/// +/// [`Box`] implements `AsMut`: +/// +/// [`Box`]: ../../std/boxed/struct.Box.html +/// +/// ``` +/// fn add_one>(num: &mut T) { +/// *num.as_mut() += 1; +/// } +/// +/// let mut boxed_num = Box::new(0); +/// add_one(&mut boxed_num); +/// assert_eq!(*boxed_num, 1); +/// ``` /// /// # Generic Impls /// @@ -100,16 +122,16 @@ pub trait AsMut { /// A conversion that consumes `self`, which may or may not be expensive. /// -/// **Note: this trait must not fail**. If the conversion can fail, use `TryInto` or a dedicated -/// method which returns an `Option` or a `Result`. +/// **Note: this trait must not fail**. If the conversion can fail, use [`TryInto`] or a dedicated +/// method which returns an [`Option`] or a [`Result`]. /// /// Library authors should not directly implement this trait, but should prefer implementing -/// the `From` trait, which offers greater flexibility and provides an equivalent `Into` +/// the [`From`][From] trait, which offers greater flexibility and provides an equivalent `Into` /// implementation for free, thanks to a blanket implementation in the standard library. /// /// # Examples /// -/// `String` implements `Into>`: +/// [`String`] implements `Into>`: /// /// ``` /// fn is_hello>>(s: T) { @@ -123,9 +145,15 @@ pub trait AsMut { /// /// # Generic Impls /// -/// - `From for U` implies `Into for T` -/// - `into()` is reflexive, which means that `Into for T` is implemented +/// - `[From][From] for U` implies `Into for T` +/// - [`into()`] is reflexive, which means that `Into for T` is implemented /// +/// [`TryInto`]: trait.TryInto.html +/// [`Option`]: ../../std/option/enum.Option.html +/// [`Result`]: ../../std/result/enum.Result.html +/// [`String`]: ../../std/string/struct.String.html +/// [From]: trait.From.html +/// [`into()`]: trait.Into.html#tymethod.into #[stable(feature = "rust1", since = "1.0.0")] pub trait Into: Sized { /// Performs the conversion. @@ -135,12 +163,12 @@ pub trait Into: Sized { /// Construct `Self` via a conversion. /// -/// **Note: this trait must not fail**. If the conversion can fail, use `TryFrom` or a dedicated -/// method which returns an `Option` or a `Result`. +/// **Note: this trait must not fail**. If the conversion can fail, use [`TryFrom`] or a dedicated +/// method which returns an [`Option`] or a [`Result`]. /// /// # Examples /// -/// `String` implements `From<&str>`: +/// [`String`] implements `From<&str>`: /// /// ``` /// let string = "hello".to_string(); @@ -150,9 +178,15 @@ pub trait Into: Sized { /// ``` /// # Generic impls /// -/// - `From for U` implies `Into for T` -/// - `from()` is reflexive, which means that `From for T` is implemented +/// - `From for U` implies `[Into] for T` +/// - [`from()`] is reflexive, which means that `From for T` is implemented /// +/// [`TryFrom`]: trait.TryFrom.html +/// [`Option`]: ../../std/option/enum.Option.html +/// [`Result`]: ../../std/result/enum.Result.html +/// [`String`]: ../../std/string/struct.String.html +/// [Into]: trait.Into.html +/// [`from()`]: trait.From.html#tymethod.from #[stable(feature = "rust1", since = "1.0.0")] pub trait From: Sized { /// Performs the conversion. @@ -163,8 +197,10 @@ pub trait From: Sized { /// An attempted conversion that consumes `self`, which may or may not be expensive. /// /// Library authors should not directly implement this trait, but should prefer implementing -/// the `TryFrom` trait, which offers greater flexibility and provides an equivalent `TryInto` +/// the [`TryFrom`] trait, which offers greater flexibility and provides an equivalent `TryInto` /// implementation for free, thanks to a blanket implementation in the standard library. +/// +/// [`TryFrom`]: trait.TryFrom.html #[unstable(feature = "try_from", issue = "33417")] pub trait TryInto: Sized { /// The type returned in the event of a conversion error. diff --git a/src/libcore/default.rs b/src/libcore/default.rs index 485ddae07f..85e4b2a006 100644 --- a/src/libcore/default.rs +++ b/src/libcore/default.rs @@ -12,8 +12,6 @@ #![stable(feature = "rust1", since = "1.0.0")] -use marker::Sized; - /// A trait for giving a type a useful default value. /// /// Sometimes, you want to fall back to some kind of default value, and @@ -38,7 +36,6 @@ use marker::Sized; /// bar: f32, /// } /// -/// /// fn main() { /// let options: SomeOptions = Default::default(); /// } diff --git a/src/libcore/fmt/builders.rs b/src/libcore/fmt/builders.rs index 6cac80ab62..102e3c0bd7 100644 --- a/src/libcore/fmt/builders.rs +++ b/src/libcore/fmt/builders.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use prelude::v1::*; use fmt::{self, FlagV1}; struct PadAdapter<'a, 'b: 'a> { diff --git a/src/libcore/fmt/mod.rs b/src/libcore/fmt/mod.rs index dbd715c722..8342d663cd 100644 --- a/src/libcore/fmt/mod.rs +++ b/src/libcore/fmt/mod.rs @@ -12,8 +12,6 @@ #![stable(feature = "rust1", since = "1.0.0")] -use prelude::v1::*; - use cell::{UnsafeCell, Cell, RefCell, Ref, RefMut, BorrowState}; use marker::PhantomData; use mem; @@ -274,10 +272,14 @@ impl<'a> Arguments<'a> { /// safely be done so, so no constructors are given and the fields are private /// to prevent modification. /// -/// The `format_args!` macro will safely create an instance of this structure +/// The [`format_args!`] macro will safely create an instance of this structure /// and pass it to a function or closure, passed as the first argument. The -/// macro validates the format string at compile-time so usage of the `write` -/// and `format` functions can be safely performed. +/// macro validates the format string at compile-time so usage of the [`write`] +/// and [`format`] functions can be safely performed. +/// +/// [`format_args!`]: ../../std/macro.format_args.html +/// [`format`]: ../../std/fmt/fn.format.html +/// [`write`]: ../../std/fmt/fn.write.html #[stable(feature = "rust1", since = "1.0.0")] #[derive(Copy, Clone)] pub struct Arguments<'a> { @@ -905,8 +907,6 @@ impl<'a> Formatter<'a> { prefix: &str, buf: &str) -> Result { - use char::CharExt; - let mut width = buf.len(); let mut sign = None; @@ -1020,7 +1020,6 @@ impl<'a> Formatter<'a> { f: F) -> Result where F: FnOnce(&mut Formatter) -> Result, { - use char::CharExt; let align = match self.align { rt::v1::Alignment::Unknown => default, _ => self.align @@ -1363,28 +1362,19 @@ macro_rules! fmt_refs { fmt_refs! { Debug, Display, Octal, Binary, LowerHex, UpperHex, LowerExp, UpperExp } -// Note: This macro is a temporary hack that can be remove once we are building with a compiler -// that supports `!` -macro_rules! not_stage0 { - () => { - #[unstable(feature = "never_type", issue = "35121")] - impl Debug for ! { - fn fmt(&self, _: &mut Formatter) -> Result { - *self - } - } - - #[unstable(feature = "never_type", issue = "35121")] - impl Display for ! { - fn fmt(&self, _: &mut Formatter) -> Result { - *self - } - } +#[unstable(feature = "never_type", issue = "35121")] +impl Debug for ! { + fn fmt(&self, _: &mut Formatter) -> Result { + *self } } -#[cfg(not(stage0))] -not_stage0!(); +#[unstable(feature = "never_type", issue = "35121")] +impl Display for ! { + fn fmt(&self, _: &mut Formatter) -> Result { + *self + } +} #[stable(feature = "rust1", since = "1.0.0")] impl Debug for bool { diff --git a/src/libcore/fmt/num.rs b/src/libcore/fmt/num.rs index d55e0317a9..0145897d8f 100644 --- a/src/libcore/fmt/num.rs +++ b/src/libcore/fmt/num.rs @@ -14,8 +14,6 @@ // FIXME: #6220 Implement floating point formatting -use prelude::v1::*; - use fmt; use num::Zero; use ops::{Div, Rem, Sub}; diff --git a/src/libcore/fmt/rt/v1.rs b/src/libcore/fmt/rt/v1.rs index 6b31e04062..ec7add9c37 100644 --- a/src/libcore/fmt/rt/v1.rs +++ b/src/libcore/fmt/rt/v1.rs @@ -31,7 +31,7 @@ pub struct FormatSpec { } /// Possible alignments that can be requested as part of a formatting directive. -#[derive(Copy, Clone, PartialEq)] +#[derive(Copy, Clone, PartialEq, Eq)] pub enum Alignment { /// Indication that contents should be left-aligned. Left, diff --git a/src/libcore/hash/mod.rs b/src/libcore/hash/mod.rs index 27fdbd3830..6a60cfcc12 100644 --- a/src/libcore/hash/mod.rs +++ b/src/libcore/hash/mod.rs @@ -71,16 +71,16 @@ #![stable(feature = "rust1", since = "1.0.0")] -use prelude::v1::*; - use fmt; use marker; use mem; #[stable(feature = "rust1", since = "1.0.0")] +#[allow(deprecated)] pub use self::sip::SipHasher; #[unstable(feature = "sip_hash_13", issue = "29754")] +#[allow(deprecated)] pub use self::sip::{SipHasher13, SipHasher24}; mod sip; @@ -288,8 +288,6 @@ impl Default for BuildHasherDefault { ////////////////////////////////////////////////////////////////////////////// mod impls { - use prelude::v1::*; - use mem; use slice; use super::*; diff --git a/src/libcore/hash/sip.rs b/src/libcore/hash/sip.rs index 4a806a3c98..bf138a45de 100644 --- a/src/libcore/hash/sip.rs +++ b/src/libcore/hash/sip.rs @@ -10,7 +10,7 @@ //! An implementation of SipHash. -use prelude::v1::*; +#![allow(deprecated)] use marker::PhantomData; use ptr; @@ -19,6 +19,7 @@ use ptr; /// /// See: https://131002.net/siphash/ #[unstable(feature = "sip_hash_13", issue = "34767")] +#[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")] #[derive(Debug, Clone, Default)] pub struct SipHasher13 { hasher: Hasher, @@ -28,6 +29,7 @@ pub struct SipHasher13 { /// /// See: https://131002.net/siphash/ #[unstable(feature = "sip_hash_13", issue = "34767")] +#[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")] #[derive(Debug, Clone, Default)] pub struct SipHasher24 { hasher: Hasher, @@ -49,6 +51,7 @@ pub struct SipHasher24 { /// it is not intended for cryptographic purposes. As such, all /// cryptographic uses of this implementation are _strongly discouraged_. #[stable(feature = "rust1", since = "1.0.0")] +#[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")] #[derive(Debug, Clone, Default)] pub struct SipHasher(SipHasher24); @@ -138,6 +141,7 @@ impl SipHasher { /// Creates a new `SipHasher` with the two initial keys set to 0. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")] pub fn new() -> SipHasher { SipHasher::new_with_keys(0, 0) } @@ -145,16 +149,17 @@ impl SipHasher { /// Creates a `SipHasher` that is keyed off the provided keys. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")] pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher { SipHasher(SipHasher24::new_with_keys(key0, key1)) } } - impl SipHasher13 { /// Creates a new `SipHasher13` with the two initial keys set to 0. #[inline] #[unstable(feature = "sip_hash_13", issue = "34767")] + #[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")] pub fn new() -> SipHasher13 { SipHasher13::new_with_keys(0, 0) } @@ -162,6 +167,7 @@ impl SipHasher13 { /// Creates a `SipHasher13` that is keyed off the provided keys. #[inline] #[unstable(feature = "sip_hash_13", issue = "34767")] + #[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")] pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 { SipHasher13 { hasher: Hasher::new_with_keys(key0, key1) @@ -173,6 +179,7 @@ impl SipHasher24 { /// Creates a new `SipHasher24` with the two initial keys set to 0. #[inline] #[unstable(feature = "sip_hash_13", issue = "34767")] + #[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")] pub fn new() -> SipHasher24 { SipHasher24::new_with_keys(0, 0) } @@ -180,6 +187,7 @@ impl SipHasher24 { /// Creates a `SipHasher24` that is keyed off the provided keys. #[inline] #[unstable(feature = "sip_hash_13", issue = "34767")] + #[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")] pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher24 { SipHasher24 { hasher: Hasher::new_with_keys(key0, key1) @@ -335,6 +343,7 @@ impl Clone for Hasher { } impl Default for Hasher { + /// Creates a `Hasher` with the two initial keys set to 0. #[inline] fn default() -> Hasher { Hasher::new_with_keys(0, 0) diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index c645608dda..22abe7a99b 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -46,8 +46,6 @@ issue = "0")] #![allow(missing_docs)] -use marker::Sized; - extern "rust-intrinsic" { // NB: These intrinsics take raw pointers because they mutate aliased @@ -196,6 +194,20 @@ extern "rust-intrinsic" { /// own, or if it does not enable any significant optimizations. pub fn assume(b: bool); + #[cfg(not(stage0))] + /// Hints to the compiler that branch condition is likely to be true. + /// Returns the value passed to it. + /// + /// Any use other than with `if` statements will probably not have an effect. + pub fn likely(b: bool) -> bool; + + #[cfg(not(stage0))] + /// Hints to the compiler that branch condition is likely to be false. + /// Returns the value passed to it. + /// + /// Any use other than with `if` statements will probably not have an effect. + pub fn unlikely(b: bool) -> bool; + /// Executes a breakpoint trap, for inspection by a debugger. pub fn breakpoint(); @@ -244,19 +256,6 @@ extern "rust-intrinsic" { /// crate it is invoked in. pub fn type_id() -> u64; - /// Creates a value initialized to so that its drop flag, - /// if any, says that it has been dropped. - /// - /// `init_dropped` is unsafe because it returns a datum with all - /// of its bytes set to the drop flag, which generally does not - /// correspond to a valid value. - /// - /// This intrinsic is likely to be deprecated in the future when - /// Rust moves to non-zeroing dynamic drop (and thus removes the - /// embedded drop flags that are being established by this - /// intrinsic). - pub fn init_dropped() -> T; - /// Creates a value initialized to zero. /// /// `init` is unsafe because it returns a zeroed-out datum, @@ -277,22 +276,25 @@ extern "rust-intrinsic" { /// Moves a value out of scope without running drop glue. pub fn forget(_: T) -> (); - /// Reinterprets the bits of a value of one type as another type; both types - /// must have the same size. Neither the original, nor the result, may be an - /// [invalid value] (../../nomicon/meet-safe-and-unsafe.html). + /// Reinterprets the bits of a value of one type as another type. + /// + /// Both types must have the same size. Neither the original, nor the result, + /// may be an [invalid value](../../nomicon/meet-safe-and-unsafe.html). /// /// `transmute` is semantically equivalent to a bitwise move of one type - /// into another. It copies the bits from the destination type into the - /// source type, then forgets the original. It's equivalent to C's `memcpy` - /// under the hood, just like `transmute_copy`. + /// into another. It copies the bits from the source value into the + /// destination value, then forgets the original. It's equivalent to C's + /// `memcpy` under the hood, just like `transmute_copy`. /// - /// `transmute` is incredibly unsafe. There are a vast number of ways to - /// cause undefined behavior with this function. `transmute` should be + /// `transmute` is **incredibly** unsafe. There are a vast number of ways to + /// cause [undefined behavior][ub] with this function. `transmute` should be /// the absolute last resort. /// /// The [nomicon](../../nomicon/transmutes.html) has additional /// documentation. /// + /// [ub]: ../../reference.html#behavior-considered-undefined + /// /// # Examples /// /// There are a few things that `transmute` is really useful for. @@ -307,7 +309,8 @@ extern "rust-intrinsic" { /// assert_eq!(bitpattern, 0x3F800000); /// ``` /// - /// Turning a pointer into a function pointer: + /// Turning a pointer into a function pointer. This is *not* portable to + /// machines where function pointers and data pointers have different sizes. /// /// ``` /// fn foo() -> i32 { @@ -320,8 +323,8 @@ extern "rust-intrinsic" { /// assert_eq!(function(), 0); /// ``` /// - /// Extending a lifetime, or shortening an invariant lifetime; this is - /// advanced, very unsafe rust: + /// Extending a lifetime, or shortening an invariant lifetime. This is + /// advanced, very unsafe Rust! /// /// ``` /// struct R<'a>(&'a i32); @@ -337,11 +340,9 @@ extern "rust-intrinsic" { /// /// # Alternatives /// - /// However, many uses of `transmute` can be achieved through other means. - /// `transmute` can transform any type into any other, with just the caveat - /// that they're the same size, and often interesting results occur. Below - /// are common applications of `transmute` which can be replaced with safe - /// applications of `as`: + /// Don't despair: many uses of `transmute` can be achieved through other means. + /// Below are common applications of `transmute` which can be replaced with safer + /// constructs. /// /// Turning a pointer into a `usize`: /// @@ -350,6 +351,7 @@ extern "rust-intrinsic" { /// let ptr_num_transmute = unsafe { /// std::mem::transmute::<&i32, usize>(ptr) /// }; + /// /// // Use an `as` cast instead /// let ptr_num_cast = ptr as *const i32 as usize; /// ``` @@ -361,6 +363,7 @@ extern "rust-intrinsic" { /// let ref_transmuted = unsafe { /// std::mem::transmute::<*mut i32, &mut i32>(ptr) /// }; + /// /// // Use a reborrow instead /// let ref_casted = unsafe { &mut *ptr }; /// ``` @@ -372,6 +375,7 @@ extern "rust-intrinsic" { /// let val_transmuted = unsafe { /// std::mem::transmute::<&mut i32, &mut u32>(ptr) /// }; + /// /// // Now, put together `as` and reborrowing - note the chaining of `as` /// // `as` is not transitive /// let val_casts = unsafe { &mut *(ptr as *mut i32 as *mut u32) }; @@ -383,9 +387,11 @@ extern "rust-intrinsic" { /// // this is not a good way to do this. /// let slice = unsafe { std::mem::transmute::<&str, &[u8]>("Rust") }; /// assert_eq!(slice, &[82, 117, 115, 116]); + /// /// // You could use `str::as_bytes` /// let slice = "Rust".as_bytes(); /// assert_eq!(slice, &[82, 117, 115, 116]); + /// /// // Or, just use a byte string, if you have control over the string /// // literal /// assert_eq!(b"Rust", &[82, 117, 115, 116]); @@ -396,18 +402,21 @@ extern "rust-intrinsic" { /// ``` /// let store = [0, 1, 2, 3]; /// let mut v_orig = store.iter().collect::>(); + /// /// // Using transmute: this is Undefined Behavior, and a bad idea. /// // However, it is no-copy. /// let v_transmuted = unsafe { /// std::mem::transmute::, Vec>>( /// v_orig.clone()) /// }; + /// /// // This is the suggested, safe way. - /// // It does copy the entire Vector, though, into a new array. + /// // It does copy the entire vector, though, into a new array. /// let v_collected = v_orig.clone() /// .into_iter() /// .map(|r| Some(r)) /// .collect::>>(); + /// /// // The no-copy, unsafe way, still using transmute, but not UB. /// // This is equivalent to the original, but safer, and reuses the /// // same Vec internals. Therefore the new inner type must have the @@ -427,6 +436,7 @@ extern "rust-intrinsic" { /// /// ``` /// use std::{slice, mem}; + /// /// // There are multiple ways to do this; and there are multiple problems /// // with the following, transmute, way. /// fn split_at_mut_transmute(slice: &mut [T], mid: usize) @@ -441,6 +451,7 @@ extern "rust-intrinsic" { /// (&mut slice[0..mid], &mut slice2[mid..len]) /// } /// } + /// /// // This gets rid of the typesafety problems; `&mut *` will *only* give /// // you an `&mut T` from an `&mut T` or `*mut T`. /// fn split_at_mut_casts(slice: &mut [T], mid: usize) @@ -454,6 +465,7 @@ extern "rust-intrinsic" { /// (&mut slice[0..mid], &mut slice2[mid..len]) /// } /// } + /// /// // This is how the standard library does it. This is the best method, if /// // you need to do something like this /// fn split_at_stdlib(slice: &mut [T], mid: usize) diff --git a/src/libcore/iter/iterator.rs b/src/libcore/iter/iterator.rs index 6b01ccacee..0e74bbe9c2 100644 --- a/src/libcore/iter/iterator.rs +++ b/src/libcore/iter/iterator.rs @@ -8,19 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use clone::Clone; -use cmp::{Ord, PartialOrd, PartialEq, Ordering}; -use default::Default; -use ops::FnMut; -use option::Option::{self, Some, None}; -use marker::Sized; +use cmp::Ordering; use super::{Chain, Cycle, Cloned, Enumerate, Filter, FilterMap, FlatMap, Fuse}; use super::{Inspect, Map, Peekable, Scan, Skip, SkipWhile, Take, TakeWhile, Rev}; use super::{Zip, Sum, Product}; -use super::ChainState; -use super::{DoubleEndedIterator, ExactSizeIterator, Extend, FromIterator}; -use super::{IntoIterator, ZipImpl}; +use super::{ChainState, FromIterator, ZipImpl}; fn _assert_is_object_safe(_: &Iterator) {} @@ -1664,6 +1657,32 @@ pub trait Iterator { .map(|(_, x)| x) } + /// Returns the element that gives the maximum value with respect to the + /// specified comparison function. + /// + /// Returns the rightmost element if the comparison determines two elements + /// to be equally maximum. + /// + /// # Examples + /// + /// ``` + /// #![feature(iter_max_by)] + /// let a = [-3_i32, 0, 1, 5, -10]; + /// assert_eq!(*a.iter().max_by(|x, y| x.cmp(y)).unwrap(), 5); + /// ``` + #[inline] + #[unstable(feature = "iter_max_by", issue="36105")] + fn max_by(self, mut compare: F) -> Option + where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + select_fold1(self, + |_| (), + // switch to y even if it is only equal, to preserve + // stability. + |_, x, _, y| Ordering::Greater != compare(x, y)) + .map(|(_, x)| x) + } + /// Returns the element that gives the minimum value from the /// specified function. /// @@ -1688,6 +1707,33 @@ pub trait Iterator { .map(|(_, x)| x) } + /// Returns the element that gives the minimum value with respect to the + /// specified comparison function. + /// + /// Returns the latest element if the comparison determines two elements + /// to be equally minimum. + /// + /// # Examples + /// + /// ``` + /// #![feature(iter_min_by)] + /// let a = [-3_i32, 0, 1, 5, -10]; + /// assert_eq!(*a.iter().min_by(|x, y| x.cmp(y)).unwrap(), -10); + /// ``` + #[inline] + #[unstable(feature = "iter_min_by", issue="36105")] + fn min_by(self, mut compare: F) -> Option + where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + select_fold1(self, + |_| (), + // switch to y even if it is strictly smaller, to + // preserve stability. + |_, x, _, y| Ordering::Greater == compare(x, y)) + .map(|(_, x)| x) + } + + /// Reverses an iterator's direction. /// /// Usually, iterators iterate from left to right. After using `rev()`, @@ -1821,7 +1867,8 @@ pub trait Iterator { /// # Panics /// /// When calling `sum` and a primitive integer type is being returned, this - /// method will panic if the computation overflows. + /// method will panic if the computation overflows and debug assertions are + /// enabled. /// /// # Examples /// @@ -1848,7 +1895,8 @@ pub trait Iterator { /// # Panics /// /// When calling `product` and a primitive integer type is being returned, - /// this method will panic if the computation overflows. + /// method will panic if the computation overflows and debug assertions are + /// enabled. /// /// # Examples /// diff --git a/src/libcore/iter/mod.rs b/src/libcore/iter/mod.rs index 37baa0c404..dd57fd1b51 100644 --- a/src/libcore/iter/mod.rs +++ b/src/libcore/iter/mod.rs @@ -299,12 +299,9 @@ #![stable(feature = "rust1", since = "1.0.0")] -use clone::Clone; use cmp; use fmt; use iter_private::TrustedRandomAccess; -use ops::FnMut; -use option::Option::{self, Some, None}; use usize; #[stable(feature = "rust1", since = "1.0.0")] @@ -329,6 +326,8 @@ pub use self::sources::{Once, once}; pub use self::traits::{FromIterator, IntoIterator, DoubleEndedIterator, Extend}; #[stable(feature = "rust1", since = "1.0.0")] pub use self::traits::{ExactSizeIterator, Sum, Product}; +#[unstable(feature = "fused", issue = "35602")] +pub use self::traits::FusedIterator; mod iterator; mod range; @@ -369,6 +368,10 @@ impl DoubleEndedIterator for Rev where I: DoubleEndedIterator { impl ExactSizeIterator for Rev where I: ExactSizeIterator + DoubleEndedIterator {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Rev + where I: FusedIterator + DoubleEndedIterator {} + /// An iterator that clones the elements of an underlying iterator. /// /// This `struct` is created by the [`cloned()`] method on [`Iterator`]. See its @@ -412,6 +415,11 @@ impl<'a, I, T: 'a> ExactSizeIterator for Cloned where I: ExactSizeIterator, T: Clone {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, I, T: 'a> FusedIterator for Cloned + where I: FusedIterator, T: Clone +{} + /// An iterator that repeats endlessly. /// /// This `struct` is created by the [`cycle()`] method on [`Iterator`]. See its @@ -450,6 +458,9 @@ impl Iterator for Cycle where I: Clone + Iterator { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Cycle where I: Clone + Iterator {} + /// An iterator that strings two iterators together. /// /// This `struct` is created by the [`chain()`] method on [`Iterator`]. See its @@ -612,6 +623,13 @@ impl DoubleEndedIterator for Chain where } } +// Note: *both* must be fused to handle double-ended iterators. +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Chain + where A: FusedIterator, + B: FusedIterator, +{} + /// An iterator that iterates two other iterators simultaneously. /// /// This `struct` is created by the [`zip()`] method on [`Iterator`]. See its @@ -798,6 +816,10 @@ unsafe impl TrustedRandomAccess for Zip } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Zip + where A: FusedIterator, B: FusedIterator, {} + /// An iterator that maps the values of `iter` with `f`. /// /// This `struct` is created by the [`map()`] method on [`Iterator`]. See its @@ -894,6 +916,10 @@ impl DoubleEndedIterator for Map where impl ExactSizeIterator for Map where F: FnMut(I::Item) -> B {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Map + where F: FnMut(I::Item) -> B {} + /// An iterator that filters the elements of `iter` with `predicate`. /// /// This `struct` is created by the [`filter()`] method on [`Iterator`]. See its @@ -954,6 +980,10 @@ impl DoubleEndedIterator for Filter } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Filter + where P: FnMut(&I::Item) -> bool {} + /// An iterator that uses `f` to both filter and map elements from `iter`. /// /// This `struct` is created by the [`filter_map()`] method on [`Iterator`]. See its @@ -1016,6 +1046,10 @@ impl DoubleEndedIterator for FilterMap } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for FilterMap + where F: FnMut(I::Item) -> Option {} + /// An iterator that yields the current count and the element during iteration. /// /// This `struct` is created by the [`enumerate()`] method on [`Iterator`]. See its @@ -1103,6 +1137,9 @@ unsafe impl TrustedRandomAccess for Enumerate } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Enumerate where I: FusedIterator {} + /// An iterator with a `peek()` that returns an optional reference to the next /// element. /// @@ -1170,6 +1207,9 @@ impl Iterator for Peekable { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for Peekable {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Peekable {} + impl Peekable { /// Returns a reference to the next() value without advancing the iterator. /// @@ -1271,6 +1311,10 @@ impl Iterator for SkipWhile } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for SkipWhile + where I: FusedIterator, P: FnMut(&I::Item) -> bool {} + /// An iterator that only accepts elements while `predicate` is true. /// /// This `struct` is created by the [`take_while()`] method on [`Iterator`]. See its @@ -1326,6 +1370,10 @@ impl Iterator for TakeWhile } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for TakeWhile + where I: FusedIterator, P: FnMut(&I::Item) -> bool {} + /// An iterator that skips over `n` elements of `iter`. /// /// This `struct` is created by the [`skip()`] method on [`Iterator`]. See its @@ -1417,6 +1465,9 @@ impl DoubleEndedIterator for Skip where I: DoubleEndedIterator + ExactSize } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Skip where I: FusedIterator {} + /// An iterator that only iterates over the first `n` iterations of `iter`. /// /// This `struct` is created by the [`take()`] method on [`Iterator`]. See its @@ -1478,6 +1529,8 @@ impl Iterator for Take where I: Iterator{ #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for Take where I: ExactSizeIterator {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Take where I: FusedIterator {} /// An iterator to maintain state while iterating another iterator. /// @@ -1524,6 +1577,10 @@ impl Iterator for Scan where } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Scan + where I: FusedIterator, F: FnMut(&mut St, I::Item) -> Option {} + /// An iterator that maps each element to an iterator, and yields the elements /// of the produced iterators. /// @@ -1610,6 +1667,10 @@ impl DoubleEndedIterator for FlatMap wher } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for FlatMap + where I: FusedIterator, U: IntoIterator, F: FnMut(I::Item) -> U {} + /// An iterator that yields `None` forever after the underlying iterator /// yields `None` once. /// @@ -1626,12 +1687,15 @@ pub struct Fuse { done: bool } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Fuse where I: Iterator {} + #[stable(feature = "rust1", since = "1.0.0")] impl Iterator for Fuse where I: Iterator { type Item = ::Item; #[inline] - fn next(&mut self) -> Option<::Item> { + default fn next(&mut self) -> Option<::Item> { if self.done { None } else { @@ -1642,7 +1706,7 @@ impl Iterator for Fuse where I: Iterator { } #[inline] - fn nth(&mut self, n: usize) -> Option { + default fn nth(&mut self, n: usize) -> Option { if self.done { None } else { @@ -1653,7 +1717,7 @@ impl Iterator for Fuse where I: Iterator { } #[inline] - fn last(self) -> Option { + default fn last(self) -> Option { if self.done { None } else { @@ -1662,7 +1726,7 @@ impl Iterator for Fuse where I: Iterator { } #[inline] - fn count(self) -> usize { + default fn count(self) -> usize { if self.done { 0 } else { @@ -1671,7 +1735,7 @@ impl Iterator for Fuse where I: Iterator { } #[inline] - fn size_hint(&self) -> (usize, Option) { + default fn size_hint(&self) -> (usize, Option) { if self.done { (0, Some(0)) } else { @@ -1683,7 +1747,7 @@ impl Iterator for Fuse where I: Iterator { #[stable(feature = "rust1", since = "1.0.0")] impl DoubleEndedIterator for Fuse where I: DoubleEndedIterator { #[inline] - fn next_back(&mut self) -> Option<::Item> { + default fn next_back(&mut self) -> Option<::Item> { if self.done { None } else { @@ -1694,6 +1758,53 @@ impl DoubleEndedIterator for Fuse where I: DoubleEndedIterator { } } +unsafe impl TrustedRandomAccess for Fuse + where I: TrustedRandomAccess, +{ + unsafe fn get_unchecked(&mut self, i: usize) -> I::Item { + self.iter.get_unchecked(i) + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl Iterator for Fuse where I: FusedIterator { + #[inline] + fn next(&mut self) -> Option<::Item> { + self.iter.next() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + self.iter.nth(n) + } + + #[inline] + fn last(self) -> Option { + self.iter.last() + } + + #[inline] + fn count(self) -> usize { + self.iter.count() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +#[unstable(feature = "fused", reason = "recently added", issue = "35602")] +impl DoubleEndedIterator for Fuse + where I: DoubleEndedIterator + FusedIterator +{ + #[inline] + fn next_back(&mut self) -> Option<::Item> { + self.iter.next_back() + } +} + + #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for Fuse where I: ExactSizeIterator {} @@ -1763,3 +1874,7 @@ impl DoubleEndedIterator for Inspect #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for Inspect where F: FnMut(&I::Item) {} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Inspect + where F: FnMut(&I::Item) {} diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs index c234ef21db..66d05d81d8 100644 --- a/src/libcore/iter/range.rs +++ b/src/libcore/iter/range.rs @@ -8,15 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use clone::Clone; -use cmp::PartialOrd; use mem; use ops::{self, Add, Sub}; -use option::Option::{self, Some, None}; -use marker::Sized; use usize; -use super::{DoubleEndedIterator, ExactSizeIterator, Iterator}; +use super::FusedIterator; /// Objects that can be stepped over in both directions. /// @@ -267,14 +263,12 @@ impl ops::RangeFrom { /// # Examples /// /// ``` - /// # #![feature(step_by)] - /// - /// for i in (0u8..).step_by(2).take(10) { - /// println!("{}", i); + /// #![feature(step_by)] + /// fn main() { + /// let result: Vec<_> = (0..).step_by(2).take(5).collect(); + /// assert_eq!(result, vec![0, 2, 4, 6, 8]); /// } /// ``` - /// - /// This prints the first ten even natural integers (0 to 18). #[unstable(feature = "step_by", reason = "recent addition", issue = "27741")] pub fn step_by(self, by: A) -> StepBy { @@ -295,21 +289,11 @@ impl ops::Range { /// /// ``` /// #![feature(step_by)] - /// - /// for i in (0..10).step_by(2) { - /// println!("{}", i); + /// fn main() { + /// let result: Vec<_> = (0..10).step_by(2).collect(); + /// assert_eq!(result, vec![0, 2, 4, 6, 8]); /// } /// ``` - /// - /// This prints: - /// - /// ```text - /// 0 - /// 2 - /// 4 - /// 6 - /// 8 - /// ``` #[unstable(feature = "step_by", reason = "recent addition", issue = "27741")] pub fn step_by(self, by: A) -> StepBy { @@ -331,20 +315,8 @@ impl ops::RangeInclusive { /// ``` /// #![feature(step_by, inclusive_range_syntax)] /// - /// for i in (0...10).step_by(2) { - /// println!("{}", i); - /// } - /// ``` - /// - /// This prints: - /// - /// ```text - /// 0 - /// 2 - /// 4 - /// 6 - /// 8 - /// 10 + /// let result: Vec<_> = (0...10).step_by(2).collect(); + /// assert_eq!(result, vec![0, 2, 4, 6, 8, 10]); /// ``` #[unstable(feature = "step_by", reason = "recent addition", issue = "27741")] @@ -376,6 +348,10 @@ impl Iterator for StepBy> where } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for StepBy> + where A: Clone, for<'a> &'a A: Add<&'a A, Output = A> {} + #[stable(feature = "rust1", since = "1.0.0")] impl Iterator for StepBy> { type Item = A; @@ -413,6 +389,9 @@ impl Iterator for StepBy> { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for StepBy> {} + #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] @@ -480,6 +459,9 @@ impl Iterator for StepBy> { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for StepBy> {} + macro_rules! range_exact_iter_impl { ($($t:ty)*) => ($( #[stable(feature = "rust1", since = "1.0.0")] @@ -538,6 +520,10 @@ impl DoubleEndedIterator for ops::Range where } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for ops::Range + where A: Step, for<'a> &'a A: Add<&'a A, Output = A> {} + #[stable(feature = "rust1", since = "1.0.0")] impl Iterator for ops::RangeFrom where for<'a> &'a A: Add<&'a A, Output = A> @@ -552,6 +538,10 @@ impl Iterator for ops::RangeFrom where } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for ops::RangeFrom + where A: Step, for<'a> &'a A: Add<&'a A, Output = A> {} + #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] impl Iterator for ops::RangeInclusive where for<'a> &'a A: Add<&'a A, Output = A> @@ -651,3 +641,6 @@ impl DoubleEndedIterator for ops::RangeInclusive where } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for ops::RangeInclusive + where A: Step, for<'a> &'a A: Add<&'a A, Output = A> {} diff --git a/src/libcore/iter/sources.rs b/src/libcore/iter/sources.rs index ecd4a78b9e..da346eaf1d 100644 --- a/src/libcore/iter/sources.rs +++ b/src/libcore/iter/sources.rs @@ -8,14 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use clone::Clone; -use default::Default; use fmt; use marker; -use option::Option::{self, Some, None}; use usize; -use super::{DoubleEndedIterator, IntoIterator, Iterator, ExactSizeIterator}; +use super::FusedIterator; /// An iterator that repeats an element endlessly. /// @@ -44,6 +41,9 @@ impl DoubleEndedIterator for Repeat { fn next_back(&mut self) -> Option { Some(self.element.clone()) } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Repeat {} + /// Creates a new iterator that endlessly repeats a single element. /// /// The `repeat()` function repeats a single value over and over and over and @@ -138,6 +138,9 @@ impl ExactSizeIterator for Empty { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Empty {} + // not #[derive] because that adds a Clone bound on T, // which isn't necessary. #[stable(feature = "iter_empty", since = "1.2.0")] @@ -213,6 +216,9 @@ impl ExactSizeIterator for Once { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Once {} + /// Creates an iterator that yields an element exactly once. /// /// This is commonly used to adapt a single value into a [`chain()`] of other diff --git a/src/libcore/iter/traits.rs b/src/libcore/iter/traits.rs index cb509156e3..b55d6f96af 100644 --- a/src/libcore/iter/traits.rs +++ b/src/libcore/iter/traits.rs @@ -7,11 +7,7 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. - -use option::Option::{self, Some}; -use marker::Sized; - -use super::Iterator; +use ops::{Mul, Add}; /// Conversion from an `Iterator`. /// @@ -586,41 +582,34 @@ pub trait Product: Sized { fn product>(iter: I) -> Self; } +// NB: explicitly use Add and Mul here to inherit overflow checks macro_rules! integer_sum_product { ($($a:ident)*) => ($( #[stable(feature = "iter_arith_traits", since = "1.12.0")] impl Sum for $a { fn sum>(iter: I) -> $a { - iter.fold(0, |a, b| { - a.checked_add(b).expect("overflow in sum") - }) + iter.fold(0, Add::add) } } #[stable(feature = "iter_arith_traits", since = "1.12.0")] impl Product for $a { fn product>(iter: I) -> $a { - iter.fold(1, |a, b| { - a.checked_mul(b).expect("overflow in product") - }) + iter.fold(1, Mul::mul) } } #[stable(feature = "iter_arith_traits", since = "1.12.0")] impl<'a> Sum<&'a $a> for $a { fn sum>(iter: I) -> $a { - iter.fold(0, |a, b| { - a.checked_add(*b).expect("overflow in sum") - }) + iter.cloned().fold(0, Add::add) } } #[stable(feature = "iter_arith_traits", since = "1.12.0")] impl<'a> Product<&'a $a> for $a { fn product>(iter: I) -> $a { - iter.fold(1, |a, b| { - a.checked_mul(*b).expect("overflow in product") - }) + iter.cloned().fold(1, Mul::mul) } } )*) @@ -660,3 +649,19 @@ macro_rules! float_sum_product { integer_sum_product! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize } float_sum_product! { f32 f64 } + +/// An iterator that always continues to yield `None` when exhausted. +/// +/// Calling next on a fused iterator that has returned `None` once is guaranteed +/// to return `None` again. This trait is should be implemented by all iterators +/// that behave this way because it allows for some significant optimizations. +/// +/// Note: In general, you should not use `FusedIterator` in generic bounds if +/// you need a fused iterator. Instead, you should just call `Iterator::fused()` +/// on the iterator. If the iterator is already fused, the additional `Fuse` +/// wrapper will be a no-op with no performance penalty. +#[unstable(feature = "fused", issue = "35602")] +pub trait FusedIterator: Iterator {} + +#[unstable(feature = "fused", issue = "35602")] +impl<'a, I: FusedIterator + ?Sized> FusedIterator for &'a mut I {} diff --git a/src/libcore/iter_private.rs b/src/libcore/iter_private.rs index effe43cc67..83eeef31ab 100644 --- a/src/libcore/iter_private.rs +++ b/src/libcore/iter_private.rs @@ -9,8 +9,6 @@ // except according to those terms. -use iter::ExactSizeIterator; - /// An iterator whose items are random accessible efficiently /// /// # Safety diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index 7831464756..28101d21fc 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -42,6 +42,11 @@ //! line. It is up to consumers of this core library to define this panic //! function; it is only required to never return. This requires a `lang` //! attribute named `panic_fmt`. +//! +//! * `rust_eh_personality` - is used by the failure mechanisms of the +//! compiler. This is often mapped to GCC's personality function, but crates +//! which do not trigger a panic can be assured that this function is never +//! called. The `lang` attribute is called `eh_personality`. // Since libcore defines many fundamental lang items, all tests live in a // separate crate, libcoretest, to avoid bizarre issues. @@ -62,8 +67,6 @@ #![deny(missing_debug_implementations)] #![cfg_attr(not(stage0), deny(warnings))] -#![cfg_attr(stage0, allow(unused_attributes))] - #![feature(allow_internal_unstable)] #![feature(asm)] #![feature(associated_type_defaults)] @@ -86,10 +89,13 @@ #![feature(specialization)] #![feature(staged_api)] #![feature(unboxed_closures)] -#![feature(question_mark)] +#![cfg_attr(stage0, feature(question_mark))] +#![feature(never_type)] +#![feature(prelude_import)] -// NOTE: remove the cfg_attr next snapshot -#![cfg_attr(not(stage0), feature(never_type))] +#[prelude_import] +#[allow(unused)] +use prelude::v1::*; #[macro_use] mod macros; diff --git a/src/libcore/macros.rs b/src/libcore/macros.rs index c916ad930f..6e08abd346 100644 --- a/src/libcore/macros.rs +++ b/src/libcore/macros.rs @@ -119,6 +119,44 @@ macro_rules! assert_eq { }); } +/// Asserts that two expressions are not equal to each other. +/// +/// On panic, this macro will print the values of the expressions with their +/// debug representations. +/// +/// # Examples +/// +/// ``` +/// let a = 3; +/// let b = 2; +/// assert_ne!(a, b); +/// ``` +#[macro_export] +#[stable(feature = "assert_ne", since = "1.12.0")] +macro_rules! assert_ne { + ($left:expr , $right:expr) => ({ + match (&$left, &$right) { + (left_val, right_val) => { + if *left_val == *right_val { + panic!("assertion failed: `(left != right)` \ + (left: `{:?}`, right: `{:?}`)", left_val, right_val) + } + } + } + }); + ($left:expr , $right:expr, $($arg:tt)*) => ({ + match (&($left), &($right)) { + (left_val, right_val) => { + if *left_val == *right_val { + panic!("assertion failed: `(left != right)` \ + (left: `{:?}`, right: `{:?}`): {}", left_val, right_val, + format_args!($($arg)*)) + } + } + } + }); +} + /// Ensure that a boolean expression is `true` at runtime. /// /// This will invoke the `panic!` macro if the provided expression cannot be @@ -189,10 +227,47 @@ macro_rules! debug_assert_eq { ($($arg:tt)*) => (if cfg!(debug_assertions) { assert_eq!($($arg)*); }) } -/// Helper macro for unwrapping `Result` values while returning early with an -/// error if the value of the expression is `Err`. Can only be used in -/// functions that return `Result` because of the early return of `Err` that -/// it provides. +/// Asserts that two expressions are not equal to each other. +/// +/// On panic, this macro will print the values of the expressions with their +/// debug representations. +/// +/// Unlike `assert_ne!`, `debug_assert_ne!` statements are only enabled in non +/// optimized builds by default. An optimized build will omit all +/// `debug_assert_ne!` statements unless `-C debug-assertions` is passed to the +/// compiler. This makes `debug_assert_ne!` useful for checks that are too +/// expensive to be present in a release build but may be helpful during +/// development. +/// +/// # Examples +/// +/// ``` +/// let a = 3; +/// let b = 2; +/// debug_assert_ne!(a, b); +/// ``` +#[macro_export] +#[stable(feature = "assert_ne", since = "1.12.0")] +macro_rules! debug_assert_ne { + ($($arg:tt)*) => (if cfg!(debug_assertions) { assert_ne!($($arg)*); }) +} + +/// Helper macro for reducing boilerplate code for matching `Result` together +/// with converting downstream errors. +/// +/// Prefer using `?` syntax to `try!`. `?` is built in to the language and is +/// more succinct than `try!`. It is the standard method for error propagation. +/// +/// `try!` matches the given `Result`. In case of the `Ok` variant, the +/// expression has the value of the wrapped value. +/// +/// In case of the `Err` variant, it retrieves the inner error. `try!` then +/// performs conversion using `From`. This provides automatic conversion +/// between specialized errors and more general ones. The resulting +/// error is then immediately returned. +/// +/// Because of the early return, `try!` can only be used in functions that +/// return `Result`. /// /// # Examples /// @@ -201,18 +276,28 @@ macro_rules! debug_assert_eq { /// use std::fs::File; /// use std::io::prelude::*; /// -/// fn write_to_file_using_try() -> Result<(), io::Error> { +/// enum MyError { +/// FileWriteError +/// } +/// +/// impl From for MyError { +/// fn from(e: io::Error) -> MyError { +/// MyError::FileWriteError +/// } +/// } +/// +/// fn write_to_file_using_try() -> Result<(), MyError> { /// let mut file = try!(File::create("my_best_friends.txt")); /// try!(file.write_all(b"This is a list of my best friends.")); /// println!("I wrote to the file"); /// Ok(()) /// } /// // This is equivalent to: -/// fn write_to_file_using_match() -> Result<(), io::Error> { +/// fn write_to_file_using_match() -> Result<(), MyError> { /// let mut file = try!(File::create("my_best_friends.txt")); /// match file.write_all(b"This is a list of my best friends.") { /// Ok(v) => v, -/// Err(e) => return Err(e), +/// Err(e) => return Err(From::from(e)), /// } /// println!("I wrote to the file"); /// Ok(()) diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index 894982abaa..03d8af1563 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -8,24 +8,35 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Primitive traits and marker types representing basic 'kinds' of types. +//! Primitive traits and types representing basic properties of types. //! //! Rust types can be classified in various useful ways according to -//! intrinsic properties of the type. These classifications, often called -//! 'kinds', are represented as traits. +//! their intrinsic properties. These classifications are represented +//! as traits. #![stable(feature = "rust1", since = "1.0.0")] -use clone::Clone; use cmp; -use default::Default; -use option::Option; use hash::Hash; use hash::Hasher; /// Types that can be transferred across thread boundaries. /// -/// This trait is automatically derived when the compiler determines it's appropriate. +/// This trait is automatically implemented when the compiler determines it's +/// appropriate. +/// +/// An example of a non-`Send` type is the reference-counting pointer +/// [`rc::Rc`][rc]. If two threads attempt to clone `Rc`s that point to the same +/// reference-counted value, they might try to update the reference count at the +/// same time, which is [undefined behavior][ub] because `Rc` doesn't use atomic +/// operations. Its cousin [`sync::Arc`][arc] does use atomic operations (incurring +/// some overhead) and thus is `Send`. +/// +/// See [the Nomicon](../../nomicon/send-and-sync.html) for more details. +/// +/// [rc]: ../../std/rc/struct.Rc.html +/// [arc]: ../../std/sync/struct.Arc.html +/// [ub]: ../../reference.html#behavior-considered-undefined #[stable(feature = "rust1", since = "1.0.0")] #[lang = "send"] #[rustc_on_unimplemented = "`{Self}` cannot be sent between threads safely"] @@ -41,10 +52,10 @@ impl !Send for *const T { } #[stable(feature = "rust1", since = "1.0.0")] impl !Send for *mut T { } -/// Types with a constant size known at compile-time. +/// Types with a constant size known at compile time. /// -/// All type parameters which can be bounded have an implicit bound of `Sized`. The special syntax -/// `?Sized` can be used to remove this bound if it is not appropriate. +/// All type parameters have an implicit bound of `Sized`. The special syntax +/// `?Sized` can be used to remove this bound if it's not appropriate. /// /// ``` /// # #![allow(dead_code)] @@ -54,6 +65,26 @@ impl !Send for *mut T { } /// // struct FooUse(Foo<[i32]>); // error: Sized is not implemented for [i32] /// struct BarUse(Bar<[i32]>); // OK /// ``` +/// +/// The one exception is the implicit `Self` type of a trait, which does not +/// get an implicit `Sized` bound. This is because a `Sized` bound prevents +/// the trait from being used to form a [trait object]: +/// +/// ``` +/// # #![allow(unused_variables)] +/// trait Foo { } +/// trait Bar: Sized { } +/// +/// struct Impl; +/// impl Foo for Impl { } +/// impl Bar for Impl { } +/// +/// let x: &Foo = &Impl; // OK +/// // let y: &Bar = &Impl; // error: the trait `Bar` cannot +/// // be made into an object +/// ``` +/// +/// [trait object]: ../../book/trait-objects.html #[stable(feature = "rust1", since = "1.0.0")] #[lang = "sized"] #[rustc_on_unimplemented = "`{Self}` does not have a constant size known at compile-time"] @@ -62,14 +93,27 @@ pub trait Sized { // Empty. } -/// Types that can be "unsized" to a dynamically sized type. +/// Types that can be "unsized" to a dynamically-sized type. +/// +/// For example, the sized array type `[i8; 2]` implements `Unsize<[i8]>` and +/// `Unsize`. +/// +/// All implementations of `Unsize` are provided automatically by the compiler. +/// +/// `Unsize` is used along with [`ops::CoerceUnsized`][coerceunsized] to allow +/// "user-defined" containers such as [`rc::Rc`][rc] to contain dynamically-sized +/// types. See the [DST coercion RFC][RFC982] for more details. +/// +/// [coerceunsized]: ../ops/trait.CoerceUnsized.html +/// [rc]: ../../std/rc/struct.Rc.html +/// [RFC982]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md #[unstable(feature = "unsize", issue = "27732")] #[lang="unsize"] pub trait Unsize { // Empty. } -/// Types that can be copied by simply copying bits (i.e. `memcpy`). +/// Types whose values can be duplicated simply by copying bits. /// /// By default, variable bindings have 'move semantics.' In other /// words: @@ -90,7 +134,8 @@ pub trait Unsize { /// However, if a type implements `Copy`, it instead has 'copy semantics': /// /// ``` -/// // we can just derive a `Copy` implementation +/// // We can derive a `Copy` implementation. `Clone` is also required, as it's +/// // a supertrait of `Copy`. /// #[derive(Debug, Copy, Clone)] /// struct Foo; /// @@ -103,13 +148,59 @@ pub trait Unsize { /// println!("{:?}", x); // A-OK! /// ``` /// -/// It's important to note that in these two examples, the only difference is if you are allowed to -/// access `x` after the assignment: a move is also a bitwise copy under the hood. +/// It's important to note that in these two examples, the only difference is whether you +/// are allowed to access `x` after the assignment. Under the hood, both a copy and a move +/// can result in bits being copied in memory, although this is sometimes optimized away. +/// +/// ## How can I implement `Copy`? +/// +/// There are two ways to implement `Copy` on your type. The simplest is to use `derive`: +/// +/// ``` +/// #[derive(Copy, Clone)] +/// struct MyStruct; +/// ``` +/// +/// You can also implement `Copy` and `Clone` manually: +/// +/// ``` +/// struct MyStruct; +/// +/// impl Copy for MyStruct { } +/// +/// impl Clone for MyStruct { +/// fn clone(&self) -> MyStruct { +/// *self +/// } +/// } +/// ``` +/// +/// There is a small difference between the two: the `derive` strategy will also place a `Copy` +/// bound on type parameters, which isn't always desired. +/// +/// ## What's the difference between `Copy` and `Clone`? +/// +/// Copies happen implicitly, for example as part of an assignment `y = x`. The behavior of +/// `Copy` is not overloadable; it is always a simple bit-wise copy. +/// +/// Cloning is an explicit action, `x.clone()`. The implementation of [`Clone`][clone] can +/// provide any type-specific behavior necessary to duplicate values safely. For example, +/// the implementation of `Clone` for [`String`][string] needs to copy the pointed-to string +/// buffer in the heap. A simple bitwise copy of `String` values would merely copy the +/// pointer, leading to a double free down the line. For this reason, `String` is `Clone` +/// but not `Copy`. +/// +/// `Clone` is a supertrait of `Copy`, so everything which is `Copy` must also implement +/// `Clone`. If a type is `Copy` then its `Clone` implementation need only return `*self` +/// (see the example above). +/// +/// [clone]: ../clone/trait.Clone.html +/// [string]: ../../std/string/struct.String.html /// /// ## When can my type be `Copy`? /// /// A type can implement `Copy` if all of its components implement `Copy`. For example, this -/// `struct` can be `Copy`: +/// struct can be `Copy`: /// /// ``` /// # #[allow(dead_code)] @@ -119,7 +210,8 @@ pub trait Unsize { /// } /// ``` /// -/// A `struct` can be `Copy`, and `i32` is `Copy`, so therefore, `Point` is eligible to be `Copy`. +/// A struct can be `Copy`, and `i32` is `Copy`, therefore `Point` is eligible to be `Copy`. +/// By contrast, consider /// /// ``` /// # #![allow(dead_code)] @@ -129,107 +221,114 @@ pub trait Unsize { /// } /// ``` /// -/// The `PointList` `struct` cannot implement `Copy`, because `Vec` is not `Copy`. If we +/// The struct `PointList` cannot implement `Copy`, because [`Vec`] is not `Copy`. If we /// attempt to derive a `Copy` implementation, we'll get an error: /// /// ```text /// the trait `Copy` may not be implemented for this type; field `points` does not implement `Copy` /// ``` /// -/// ## When can my type _not_ be `Copy`? +/// ## When *can't* my type be `Copy`? /// /// Some types can't be copied safely. For example, copying `&mut T` would create an aliased -/// mutable reference, and copying `String` would result in two attempts to free the same buffer. -/// -/// Generalizing the latter case, any type implementing `Drop` can't be `Copy`, because it's -/// managing some resource besides its own `size_of::()` bytes. -/// -/// ## What if I derive `Copy` on a type that can't? -/// -/// If you try to derive `Copy` on a struct or enum, you will get a compile-time error. -/// Specifically, with structs you'll get [E0204](https://doc.rust-lang.org/error-index.html#E0204) -/// and with enums you'll get [E0205](https://doc.rust-lang.org/error-index.html#E0205). -/// -/// ## When should my type be `Copy`? -/// -/// Generally speaking, if your type _can_ implement `Copy`, it should. There's one important thing -/// to consider though: if you think your type may _not_ be able to implement `Copy` in the future, -/// then it might be prudent to not implement `Copy`. This is because removing `Copy` is a breaking -/// change: that second example would fail to compile if we made `Foo` non-`Copy`. -/// -/// ## Derivable +/// mutable reference. Copying [`String`] would duplicate responsibility for managing the `String`'s +/// buffer, leading to a double free. /// -/// This trait can be used with `#[derive]` if all of its components implement `Copy` and the type -/// implements `Clone`. The implementation will copy the bytes of each field using `memcpy`. +/// Generalizing the latter case, any type implementing [`Drop`] can't be `Copy`, because it's +/// managing some resource besides its own [`size_of::()`] bytes. /// -/// ## How can I implement `Copy`? +/// If you try to implement `Copy` on a struct or enum containing non-`Copy` data, you will get a +/// compile-time error. Specifically, with structs you'll get [E0204] and with enums you'll get +/// [E0205]. /// -/// There are two ways to implement `Copy` on your type: +/// [E0204]: https://doc.rust-lang.org/error-index.html#E0204 +/// [E0205]: https://doc.rust-lang.org/error-index.html#E0205 /// -/// ``` -/// #[derive(Copy, Clone)] -/// struct MyStruct; -/// ``` +/// ## When *should* my type be `Copy`? /// -/// and -/// -/// ``` -/// struct MyStruct; -/// impl Copy for MyStruct {} -/// impl Clone for MyStruct { fn clone(&self) -> MyStruct { *self } } -/// ``` +/// Generally speaking, if your type _can_ implement `Copy`, it should. Keep in mind, though, +/// that implementing `Copy` is part of the public API of your type. If the type might become +/// non-`Copy` in the future, it could be prudent to omit the `Copy` implementation now, to +/// avoid a breaking API change. /// -/// There is a small difference between the two: the `derive` strategy will also place a `Copy` -/// bound on type parameters, which isn't always desired. +/// [`Vec`]: ../../std/vec/struct.Vec.html +/// [`String`]: ../../std/string/struct.String.html +/// [`Drop`]: ../../std/ops/trait.Drop.html +/// [`size_of::()`]: ../../std/mem/fn.size_of.html #[stable(feature = "rust1", since = "1.0.0")] #[lang = "copy"] pub trait Copy : Clone { // Empty. } -/// Types that can be safely shared between threads when aliased. +/// Types for which it is safe to share references between threads. +/// +/// This trait is automatically implemented when the compiler determines +/// it's appropriate. /// /// The precise definition is: a type `T` is `Sync` if `&T` is -/// thread-safe. In other words, there is no possibility of data races -/// when passing `&T` references between threads. -/// -/// As one would expect, primitive types like `u8` and `f64` are all -/// `Sync`, and so are simple aggregate types containing them (like -/// tuples, structs and enums). More instances of basic `Sync` types -/// include "immutable" types like `&T` and those with simple -/// inherited mutability, such as `Box`, `Vec` and most other -/// collection types. (Generic parameters need to be `Sync` for their -/// container to be `Sync`.) -/// -/// A somewhat surprising consequence of the definition is `&mut T` is -/// `Sync` (if `T` is `Sync`) even though it seems that it might -/// provide unsynchronized mutation. The trick is a mutable reference -/// stored in an aliasable reference (that is, `& &mut T`) becomes -/// read-only, as if it were a `& &T`, hence there is no risk of a data -/// race. +/// [`Send`][send]. In other words, if there is no possibility of +/// [undefined behavior][ub] (including data races) when passing +/// `&T` references between threads. +/// +/// As one would expect, primitive types like [`u8`][u8] and [`f64`][f64] +/// are all `Sync`, and so are simple aggregate types containing them, +/// like tuples, structs and enums. More examples of basic `Sync` +/// types include "immutable" types like `&T`, and those with simple +/// inherited mutability, such as [`Box`][box], [`Vec`][vec] and +/// most other collection types. (Generic parameters need to be `Sync` +/// for their container to be `Sync`.) +/// +/// A somewhat surprising consequence of the definition is that `&mut T` +/// is `Sync` (if `T` is `Sync`) even though it seems like that might +/// provide unsynchronized mutation. The trick is that a mutable +/// reference behind a shared reference (that is, `& &mut T`) +/// becomes read-only, as if it were a `& &T`. Hence there is no risk +/// of a data race. /// /// Types that are not `Sync` are those that have "interior -/// mutability" in a non-thread-safe way, such as `Cell` and `RefCell` -/// in `std::cell`. These types allow for mutation of their contents -/// even when in an immutable, aliasable slot, e.g. the contents of -/// `&Cell` can be `.set`, and do not ensure data races are -/// impossible, hence they cannot be `Sync`. A higher level example -/// of a non-`Sync` type is the reference counted pointer -/// `std::rc::Rc`, because any reference `&Rc` can clone a new -/// reference, which modifies the reference counts in a non-atomic -/// way. -/// -/// For cases when one does need thread-safe interior mutability, -/// types like the atomics in `std::sync` and `Mutex` & `RWLock` in -/// the `sync` crate do ensure that any mutation cannot cause data -/// races. Hence these types are `Sync`. +/// mutability" in a non-thread-safe form, such as [`cell::Cell`][cell] +/// and [`cell::RefCell`][refcell]. These types allow for mutation of +/// their contents even through an immutable, shared reference. For +/// example the `set` method on `Cell` takes `&self`, so it requires +/// only a shared reference `&Cell`. The method performs no +/// synchronization, thus `Cell` cannot be `Sync`. /// -/// Any types with interior mutability must also use the `std::cell::UnsafeCell` -/// wrapper around the value(s) which can be mutated when behind a `&` -/// reference; not doing this is undefined behavior (for example, -/// `transmute`-ing from `&T` to `&mut T` is invalid). +/// Another example of a non-`Sync` type is the reference-counting +/// pointer [`rc::Rc`][rc]. Given any reference `&Rc`, you can clone +/// a new `Rc`, modifying the reference counts in a non-atomic way. /// -/// This trait is automatically derived when the compiler determines it's appropriate. +/// For cases when one does need thread-safe interior mutability, +/// Rust provides [atomic data types], as well as explicit locking via +/// [`sync::Mutex`][mutex] and [`sync::RWLock`][rwlock]. These types +/// ensure that any mutation cannot cause data races, hence the types +/// are `Sync`. Likewise, [`sync::Arc`][arc] provides a thread-safe +/// analogue of `Rc`. +/// +/// Any types with interior mutability must also use the +/// [`cell::UnsafeCell`][unsafecell] wrapper around the value(s) which +/// can be mutated through a shared reference. Failing to doing this is +/// [undefined behavior][ub]. For example, [`transmute`][transmute]-ing +/// from `&T` to `&mut T` is invalid. +/// +/// See [the Nomicon](../../nomicon/send-and-sync.html) for more +/// details about `Sync`. +/// +/// [send]: trait.Send.html +/// [u8]: ../../std/primitive.u8.html +/// [f64]: ../../std/primitive.f64.html +/// [box]: ../../std/boxed/struct.Box.html +/// [vec]: ../../std/vec/struct.Vec.html +/// [cell]: ../cell/struct.Cell.html +/// [refcell]: ../cell/struct.RefCell.html +/// [rc]: ../../std/rc/struct.Rc.html +/// [arc]: ../../std/sync/struct.Arc.html +/// [atomic data types]: ../sync/atomic/index.html +/// [mutex]: ../../std/sync/struct.Mutex.html +/// [rwlock]: ../../std/sync/struct.RwLock.html +/// [unsafecell]: ../cell/struct.UnsafeCell.html +/// [ub]: ../../reference.html#behavior-considered-undefined +/// [transmute]: ../../std/mem/fn.transmute.html #[stable(feature = "rust1", since = "1.0.0")] #[lang = "sync"] #[rustc_on_unimplemented = "`{Self}` cannot be shared between threads safely"] @@ -298,29 +397,30 @@ macro_rules! impls{ ) } -/// `PhantomData` allows you to describe that a type acts as if it stores a value of type `T`, -/// even though it does not. This allows you to inform the compiler about certain safety properties -/// of your code. +/// Zero-sized type used to mark things that "act like" they own a `T`. /// -/// For a more in-depth explanation of how to use `PhantomData`, please see [the Nomicon]. +/// Adding a `PhantomData` field to your type tells the compiler that your +/// type acts as though it stores a value of type `T`, even though it doesn't +/// really. This information is used when computing certain safety properties. /// -/// [the Nomicon]: ../../nomicon/phantom-data.html +/// For a more in-depth explanation of how to use `PhantomData`, please see +/// [the Nomicon](../../nomicon/phantom-data.html). /// /// # A ghastly note 👻👻👻 /// -/// Though they both have scary names, `PhantomData` and 'phantom types' are related, but not -/// identical. Phantom types are a more general concept that don't require `PhantomData` to -/// implement, but `PhantomData` is the most common way to implement them in a correct manner. +/// Though they both have scary names, `PhantomData` and 'phantom types' are +/// related, but not identical. A phantom type parameter is simply a type +/// parameter which is never used. In Rust, this often causes the compiler to +/// complain, and the solution is to add a "dummy" use by way of `PhantomData`. /// /// # Examples /// -/// ## Unused lifetime parameter +/// ## Unused lifetime parameters /// -/// Perhaps the most common time that `PhantomData` is required is -/// with a struct that has an unused lifetime parameter, typically as -/// part of some unsafe code. For example, here is a struct `Slice` -/// that has two pointers of type `*const T`, presumably pointing into -/// an array somewhere: +/// Perhaps the most common use case for `PhantomData` is a struct that has an +/// unused lifetime parameter, typically as part of some unsafe code. For +/// example, here is a struct `Slice` that has two pointers of type `*const T`, +/// presumably pointing into an array somewhere: /// /// ```ignore /// struct Slice<'a, T> { @@ -334,7 +434,7 @@ macro_rules! impls{ /// intent is not expressed in the code, since there are no uses of /// the lifetime `'a` and hence it is not clear what data it applies /// to. We can correct this by telling the compiler to act *as if* the -/// `Slice` struct contained a borrowed reference `&'a T`: +/// `Slice` struct contained a reference `&'a T`: /// /// ``` /// use std::marker::PhantomData; @@ -343,29 +443,53 @@ macro_rules! impls{ /// struct Slice<'a, T: 'a> { /// start: *const T, /// end: *const T, -/// phantom: PhantomData<&'a T> +/// phantom: PhantomData<&'a T>, /// } /// ``` /// -/// This also in turn requires that we annotate `T:'a`, indicating -/// that `T` is a type that can be borrowed for the lifetime `'a`. +/// This also in turn requires the annotation `T: 'a`, indicating +/// that any references in `T` are valid over the lifetime `'a`. +/// +/// When initializing a `Slice` you simply provide the value +/// `PhantomData` for the field `phantom`: +/// +/// ``` +/// # #![allow(dead_code)] +/// # use std::marker::PhantomData; +/// # struct Slice<'a, T: 'a> { +/// # start: *const T, +/// # end: *const T, +/// # phantom: PhantomData<&'a T>, +/// # } +/// fn borrow_vec<'a, T>(vec: &'a Vec) -> Slice<'a, T> { +/// let ptr = vec.as_ptr(); +/// Slice { +/// start: ptr, +/// end: unsafe { ptr.offset(vec.len() as isize) }, +/// phantom: PhantomData, +/// } +/// } +/// ``` /// /// ## Unused type parameters /// -/// It sometimes happens that there are unused type parameters that +/// It sometimes happens that you have unused type parameters which /// indicate what type of data a struct is "tied" to, even though that /// data is not actually found in the struct itself. Here is an -/// example where this arises when handling external resources over a -/// foreign function interface. `PhantomData` can prevent -/// mismatches by enforcing types in the method implementations: +/// example where this arises with [FFI]. The foreign interface uses +/// handles of type `*mut ()` to refer to Rust values of different +/// types. We track the Rust type using a phantom type parameter on +/// the struct `ExternalResource` which wraps a handle. +/// +/// [FFI]: ../../book/ffi.html /// /// ``` /// # #![allow(dead_code)] -/// # trait ResType { fn foo(&self); } +/// # trait ResType { } /// # struct ParamType; /// # mod foreign_lib { -/// # pub fn new(_: usize) -> *mut () { 42 as *mut () } -/// # pub fn do_stuff(_: *mut (), _: usize) {} +/// # pub fn new(_: usize) -> *mut () { 42 as *mut () } +/// # pub fn do_stuff(_: *mut (), _: usize) {} /// # } /// # fn convert_params(_: ParamType) -> usize { 42 } /// use std::marker::PhantomData; @@ -392,21 +516,20 @@ macro_rules! impls{ /// } /// ``` /// -/// ## Indicating ownership +/// ## Ownership and the drop check /// -/// Adding a field of type `PhantomData` also indicates that your -/// struct owns data of type `T`. This in turn implies that when your -/// struct is dropped, it may in turn drop one or more instances of -/// the type `T`, though that may not be apparent from the other -/// structure of the type itself. This is commonly necessary if the -/// structure is using a raw pointer like `*mut T` whose referent -/// may be dropped when the type is dropped, as a `*mut T` is -/// otherwise not treated as owned. +/// Adding a field of type `PhantomData` indicates that your +/// type owns data of type `T`. This in turn implies that when your +/// type is dropped, it may drop one or more instances of the type +/// `T`. This has bearing on the Rust compiler's [drop check] +/// analysis. /// /// If your struct does not in fact *own* the data of type `T`, it is /// better to use a reference type, like `PhantomData<&'a T>` /// (ideally) or `PhantomData<*const T>` (if no lifetime applies), so /// as not to indicate ownership. +/// +/// [drop check]: ../../nomicon/dropck.html #[lang = "phantom_data"] #[stable(feature = "rust1", since = "1.0.0")] pub struct PhantomData; @@ -414,8 +537,6 @@ pub struct PhantomData; impls! { PhantomData } mod impls { - use super::{Send, Sync, Sized}; - #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<'a, T: Sync + ?Sized> Send for &'a T {} #[stable(feature = "rust1", since = "1.0.0")] @@ -424,10 +545,13 @@ mod impls { /// Types that can be reflected over. /// -/// This trait is implemented for all types. Its purpose is to ensure -/// that when you write a generic function that will employ -/// reflection, that must be reflected (no pun intended) in the -/// generic bounds of that function. Here is an example: +/// By "reflection" we mean use of the [`Any`][any] trait, or related +/// machinery such as [`TypeId`][typeid]. +/// +/// `Reflect` is implemented for all types. Its purpose is to ensure +/// that when you write a generic function that will employ reflection, +/// that must be reflected (no pun intended) in the generic bounds of +/// that function. /// /// ``` /// #![feature(reflect_marker)] @@ -441,25 +565,29 @@ mod impls { /// } /// ``` /// -/// Without the declaration `T: Reflect`, `foo` would not type check -/// (note: as a matter of style, it would be preferable to write -/// `T: Any`, because `T: Any` implies `T: Reflect` and `T: 'static`, but -/// we use `Reflect` here to show how it works). The `Reflect` bound -/// thus serves to alert `foo`'s caller to the fact that `foo` may -/// behave differently depending on whether `T = u32` or not. In -/// particular, thanks to the `Reflect` bound, callers know that a -/// function declared like `fn bar(...)` will always act in -/// precisely the same way no matter what type `T` is supplied, -/// because there are no bounds declared on `T`. (The ability for a -/// caller to reason about what a function may do based solely on what -/// generic bounds are declared is often called the ["parametricity -/// property"][1].) -/// -/// [1]: http://en.wikipedia.org/wiki/Parametricity +/// Without the bound `T: Reflect`, `foo` would not typecheck. (As +/// a matter of style, it would be preferable to write `T: Any`, +/// because `T: Any` implies `T: Reflect` and `T: 'static`, but we +/// use `Reflect` here for illustrative purposes.) +/// +/// The `Reflect` bound serves to alert `foo`'s caller to the +/// fact that `foo` may behave differently depending on whether +/// `T` is `u32` or not. The ability for a caller to reason about what +/// a function may do based solely on what generic bounds are declared +/// is often called the "[parametricity property][param]". Despite the +/// use of `Reflect`, Rust lacks true parametricity because a generic +/// function can, at the very least, call [`mem::size_of`][size_of] +/// without employing any trait bounds whatsoever. +/// +/// [any]: ../any/trait.Any.html +/// [typeid]: ../any/struct.TypeId.html +/// [param]: http://en.wikipedia.org/wiki/Parametricity +/// [size_of]: ../mem/fn.size_of.html #[rustc_reflect_like] #[unstable(feature = "reflect_marker", reason = "requires RFC and more experience", issue = "27749")] +#[rustc_deprecated(since = "1.14.0", reason = "Specialization makes parametricity impossible")] #[rustc_on_unimplemented = "`{Self}` does not implement `Any`; \ ensure all type parameters are bounded by `Any`"] pub trait Reflect {} @@ -467,4 +595,6 @@ pub trait Reflect {} #[unstable(feature = "reflect_marker", reason = "requires RFC and more experience", issue = "27749")] +#[rustc_deprecated(since = "1.14.0", reason = "Specialization makes parametricity impossible")] +#[allow(deprecated)] impl Reflect for .. { } diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs index 5c2179ccf3..d3b8a60b79 100644 --- a/src/libcore/mem.rs +++ b/src/libcore/mem.rs @@ -15,61 +15,45 @@ #![stable(feature = "rust1", since = "1.0.0")] -use marker::Sized; use intrinsics; use ptr; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::transmute; -/// Leaks a value into the void, consuming ownership and never running its -/// destructor. +/// Leaks a value: takes ownership and "forgets" about the value **without running +/// its destructor**. /// -/// This function will take ownership of its argument, but is distinct from the -/// `mem::drop` function in that it **does not run the destructor**, leaking the -/// value and any resources that it owns. +/// Any resources the value manages, such as heap memory or a file handle, will linger +/// forever in an unreachable state. /// -/// There's only a few reasons to use this function. They mainly come -/// up in unsafe code or FFI code. -/// -/// * You have an uninitialized value, perhaps for performance reasons, and -/// need to prevent the destructor from running on it. -/// * You have two copies of a value (like when writing something like -/// [`mem::swap`][swap]), but need the destructor to only run once to -/// prevent a double `free`. -/// * Transferring resources across [FFI][ffi] boundaries. -/// -/// [swap]: fn.swap.html -/// [ffi]: ../../book/ffi.html +/// If you want to dispose of a value properly, running its destructor, see +/// [`mem::drop`][drop]. /// /// # Safety /// -/// This function is not marked as `unsafe` as Rust does not guarantee that the -/// `Drop` implementation for a value will always run. Note, however, that -/// leaking resources such as memory or I/O objects is likely not desired, so -/// this function is only recommended for specialized use cases. +/// `forget` is not marked as `unsafe`, because Rust's safety guarantees +/// do not include a guarantee that destructors will always run. For example, +/// a program can create a reference cycle using [`Rc`][rc], or call +/// [`process:exit`][exit] to exit without running destructors. Thus, allowing +/// `mem::forget` from safe code does not fundamentally change Rust's safety +/// guarantees. /// -/// The safety of this function implies that when writing `unsafe` code -/// yourself care must be taken when leveraging a destructor that is required to -/// run to preserve memory safety. There are known situations where the -/// destructor may not run (such as if ownership of the object with the -/// destructor is returned) which must be taken into account. +/// That said, leaking resources such as memory or I/O objects is usually undesirable, +/// so `forget` is only recommended for specialized use cases like those shown below. /// -/// # Other forms of Leakage +/// Because forgetting a value is allowed, any `unsafe` code you write must +/// allow for this possibility. You cannot return a value and expect that the +/// caller will necessarily run the value's destructor. /// -/// It's important to point out that this function is not the only method by -/// which a value can be leaked in safe Rust code. Other known sources of -/// leakage are: +/// [rc]: ../../std/rc/struct.Rc.html +/// [exit]: ../../std/process/fn.exit.html /// -/// * `Rc` and `Arc` cycles -/// * `mpsc::{Sender, Receiver}` cycles (they use `Arc` internally) -/// * Panicking destructors are likely to leak local resources -/// -/// # Example +/// # Examples /// /// Leak some heap memory by never deallocating it: /// -/// ```rust +/// ``` /// use std::mem; /// /// let heap_memory = Box::new(3); @@ -78,7 +62,7 @@ pub use intrinsics::transmute; /// /// Leak an I/O object, never closing the file: /// -/// ```rust,no_run +/// ```no_run /// use std::mem; /// use std::fs::File; /// @@ -86,9 +70,43 @@ pub use intrinsics::transmute; /// mem::forget(file); /// ``` /// -/// The `mem::swap` function uses `mem::forget` to good effect: +/// The practical use cases for `forget` are rather specialized and mainly come +/// up in unsafe or FFI code. +/// +/// ## Use case 1 +/// +/// You have created an uninitialized value using [`mem::uninitialized`][uninit]. +/// You must either initialize or `forget` it on every computation path before +/// Rust drops it automatically, like at the end of a scope or after a panic. +/// Running the destructor on an uninitialized value would be [undefined behavior][ub]. +/// +/// ``` +/// use std::mem; +/// use std::ptr; +/// +/// # let some_condition = false; +/// unsafe { +/// let mut uninit_vec: Vec = mem::uninitialized(); +/// +/// if some_condition { +/// // Initialize the variable. +/// ptr::write(&mut uninit_vec, Vec::new()); +/// } else { +/// // Forget the uninitialized value so its destructor doesn't run. +/// mem::forget(uninit_vec); +/// } +/// } +/// ``` +/// +/// ## Use case 2 +/// +/// You have duplicated the bytes making up a value, without doing a proper +/// [`Clone`][clone]. You need the value's destructor to run only once, +/// because a double `free` is undefined behavior. /// -/// ```rust +/// An example is the definition of [`mem::swap`][swap] in this module: +/// +/// ``` /// use std::mem; /// use std::ptr; /// @@ -110,6 +128,41 @@ pub use intrinsics::transmute; /// } /// } /// ``` +/// +/// ## Use case 3 +/// +/// You are transferring ownership across a [FFI] boundary to code written in +/// another language. You need to `forget` the value on the Rust side because Rust +/// code is no longer responsible for it. +/// +/// ```no_run +/// use std::mem; +/// +/// extern "C" { +/// fn my_c_function(x: *const u32); +/// } +/// +/// let x: Box = Box::new(3); +/// +/// // Transfer ownership into C code. +/// unsafe { +/// my_c_function(&*x); +/// } +/// mem::forget(x); +/// ``` +/// +/// In this case, C code must call back into Rust to free the object. Calling C's `free` +/// function on a [`Box`][box] is *not* safe! Also, `Box` provides an [`into_raw`][into_raw] +/// method which is the preferred way to do this in practice. +/// +/// [drop]: fn.drop.html +/// [uninit]: fn.uninitialized.html +/// [clone]: ../clone/trait.Clone.html +/// [swap]: fn.swap.html +/// [FFI]: ../../book/ffi.html +/// [box]: ../../std/boxed/struct.Box.html +/// [into_raw]: ../../std/boxed/struct.Box.html#method.into_raw +/// [ub]: ../../reference.html#behavior-considered-undefined #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn forget(t: T) { @@ -134,7 +187,14 @@ pub fn size_of() -> usize { unsafe { intrinsics::size_of::() } } -/// Returns the size of the given value in bytes. +/// Returns the size of the pointed-to value in bytes. +/// +/// This is usually the same as `size_of::()`. However, when `T` *has* no +/// statically known size, e.g. a slice [`[T]`][slice] or a [trait object], +/// then `size_of_val` can be used to get the dynamically-known size. +/// +/// [slice]: ../../std/primitive.slice.html +/// [trait object]: ../../book/trait-objects.html /// /// # Examples /// @@ -142,6 +202,10 @@ pub fn size_of() -> usize { /// use std::mem; /// /// assert_eq!(4, mem::size_of_val(&5i32)); +/// +/// let x: [u8; 13] = [0; 13]; +/// let y: &[u8] = &x; +/// assert_eq!(13, mem::size_of_val(y)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -149,10 +213,14 @@ pub fn size_of_val(val: &T) -> usize { unsafe { intrinsics::size_of_val(val) } } -/// Returns the ABI-required minimum alignment of a type +/// Returns the [ABI]-required minimum alignment of a type. +/// +/// Every valid address of a value of the type `T` must be a multiple of this number. /// /// This is the alignment used for struct fields. It may be smaller than the preferred alignment. /// +/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface +/// /// # Examples /// /// ``` @@ -168,7 +236,11 @@ pub fn min_align_of() -> usize { unsafe { intrinsics::min_align_of::() } } -/// Returns the ABI-required minimum alignment of the type of the value that `val` points to +/// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to. +/// +/// Every valid address of a value of the type `T` must be a multiple of this number. +/// +/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface /// /// # Examples /// @@ -185,10 +257,14 @@ pub fn min_align_of_val(val: &T) -> usize { unsafe { intrinsics::min_align_of_val(val) } } -/// Returns the alignment in memory for a type. +/// Returns the [ABI]-required minimum alignment of a type. +/// +/// Every valid address of a value of the type `T` must be a multiple of this number. /// /// This is the alignment used for struct fields. It may be smaller than the preferred alignment. /// +/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface +/// /// # Examples /// /// ``` @@ -202,7 +278,11 @@ pub fn align_of() -> usize { unsafe { intrinsics::min_align_of::() } } -/// Returns the ABI-required minimum alignment of the type of the value that `val` points to +/// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to. +/// +/// Every valid address of a value of the type `T` must be a multiple of this number. +/// +/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface /// /// # Examples /// @@ -217,16 +297,23 @@ pub fn align_of_val(val: &T) -> usize { unsafe { intrinsics::min_align_of_val(val) } } -/// Creates a value initialized to zero. +/// Creates a value whose bytes are all zero. /// -/// This function is similar to allocating space for a local variable and zeroing it out (an unsafe -/// operation). +/// This has the same effect as allocating space with +/// [`mem::uninitialized`][uninit] and then zeroing it out. It is useful for +/// [FFI] sometimes, but should generally be avoided. /// -/// Care must be taken when using this function, if the type `T` has a destructor and the value -/// falls out of scope (due to unwinding or returning) before being initialized, then the -/// destructor will run on zeroed data, likely leading to crashes. +/// There is no guarantee that an all-zero byte-pattern represents a valid value of +/// some type `T`. If `T` has a destructor and the value is destroyed (due to +/// a panic or the end of a scope) before being initialized, then the destructor +/// will run on zeroed data, likely leading to [undefined behavior][ub]. /// -/// This is useful for FFI functions sometimes, but should generally be avoided. +/// See also the documentation for [`mem::uninitialized`][uninit], which has +/// many of the same caveats. +/// +/// [uninit]: fn.uninitialized.html +/// [FFI]: ../../book/ffi.html +/// [ub]: ../../reference.html#behavior-considered-undefined /// /// # Examples /// @@ -234,6 +321,7 @@ pub fn align_of_val(val: &T) -> usize { /// use std::mem; /// /// let x: i32 = unsafe { mem::zeroed() }; +/// assert_eq!(0, x); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -241,54 +329,39 @@ pub unsafe fn zeroed() -> T { intrinsics::init() } -/// Creates a value initialized to an unspecified series of bytes. -/// -/// The byte sequence usually indicates that the value at the memory -/// in question has been dropped. Thus, *if* T carries a drop flag, -/// any associated destructor will not be run when the value falls out -/// of scope. -/// -/// Some code at one time used the `zeroed` function above to -/// accomplish this goal. -/// -/// This function is expected to be deprecated with the transition -/// to non-zeroing drop. -#[inline] -#[unstable(feature = "filling_drop", issue = "5016")] -pub unsafe fn dropped() -> T { - #[inline(always)] - unsafe fn dropped_impl() -> T { intrinsics::init_dropped() } - - dropped_impl() -} - /// Bypasses Rust's normal memory-initialization checks by pretending to -/// produce a value of type T, while doing nothing at all. +/// produce a value of type `T`, while doing nothing at all. /// /// **This is incredibly dangerous, and should not be done lightly. Deeply /// consider initializing your memory with a default value instead.** /// -/// This is useful for FFI functions and initializing arrays sometimes, +/// This is useful for [FFI] functions and initializing arrays sometimes, /// but should generally be avoided. /// -/// # Undefined Behavior +/// [FFI]: ../../book/ffi.html +/// +/// # Undefined behavior /// -/// It is Undefined Behavior to read uninitialized memory. Even just an +/// It is [undefined behavior][ub] to read uninitialized memory, even just an /// uninitialized boolean. For instance, if you branch on the value of such -/// a boolean your program may take one, both, or neither of the branches. +/// a boolean, your program may take one, both, or neither of the branches. /// -/// Note that this often also includes *writing* to the uninitialized value. -/// Rust believes the value is initialized, and will therefore try to Drop -/// the uninitialized value and its fields if you try to overwrite the memory -/// in a normal manner. The only way to safely initialize an arbitrary -/// uninitialized value is with one of the `ptr` functions: `write`, `copy`, or -/// `copy_nonoverlapping`. This isn't necessary if `T` is a primitive -/// or otherwise only contains types that don't implement Drop. +/// Writing to the uninitialized value is similarly dangerous. Rust believes the +/// value is initialized, and will therefore try to [`Drop`][drop] the uninitialized +/// value and its fields if you try to overwrite it in a normal manner. The only way +/// to safely initialize an uninitialized value is with [`ptr::write`][write], +/// [`ptr::copy`][copy], or [`ptr::copy_nonoverlapping`][copy_no]. /// -/// If this value *does* need some kind of Drop, it must be initialized before +/// If the value does implement `Drop`, it must be initialized before /// it goes out of scope (and therefore would be dropped). Note that this /// includes a `panic` occurring and unwinding the stack suddenly. /// +/// [ub]: ../../reference.html#behavior-considered-undefined +/// [write]: ../ptr/fn.write.html +/// [copy]: ../intrinsics/fn.copy.html +/// [copy_no]: ../intrinsics/fn.copy_nonoverlapping.html +/// [drop]: ../ops/trait.Drop.html +/// /// # Examples /// /// Here's how to safely initialize an array of `Vec`s. @@ -331,8 +404,8 @@ pub unsafe fn dropped() -> T { /// println!("{:?}", &data[0]); /// ``` /// -/// This example emphasizes exactly how delicate and dangerous doing this is. -/// Note that the `vec!` macro *does* let you initialize every element with a +/// This example emphasizes exactly how delicate and dangerous using `mem::uninitialized` +/// can be. Note that the `vec!` macro *does* let you initialize every element with a /// value that is only `Clone`, so the following is semantically equivalent and /// vastly less dangerous, as long as you can live with an extra heap /// allocation: @@ -347,21 +420,20 @@ pub unsafe fn uninitialized() -> T { intrinsics::uninit() } -/// Swap the values at two mutable locations of the same type, without deinitializing or copying -/// either one. +/// Swaps the values at two mutable locations, without deinitializing either one. /// /// # Examples /// /// ``` /// use std::mem; /// -/// let x = &mut 5; -/// let y = &mut 42; +/// let mut x = 5; +/// let mut y = 42; /// -/// mem::swap(x, y); +/// mem::swap(&mut x, &mut y); /// -/// assert_eq!(42, *x); -/// assert_eq!(5, *y); +/// assert_eq!(42, x); +/// assert_eq!(5, y); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -383,10 +455,7 @@ pub fn swap(x: &mut T, y: &mut T) { } /// Replaces the value at a mutable location with a new one, returning the old value, without -/// deinitializing or copying either one. -/// -/// This is primarily used for transferring and swapping ownership of a value in a mutable -/// location. +/// deinitializing either one. /// /// # Examples /// @@ -395,15 +464,17 @@ pub fn swap(x: &mut T, y: &mut T) { /// ``` /// use std::mem; /// -/// let mut v: Vec = Vec::new(); +/// let mut v: Vec = vec![1, 2]; /// -/// mem::replace(&mut v, Vec::new()); +/// let old_v = mem::replace(&mut v, vec![3, 4, 5]); +/// assert_eq!(2, old_v.len()); +/// assert_eq!(3, v.len()); /// ``` /// -/// This function allows consumption of one field of a struct by replacing it with another value. -/// The normal approach doesn't always work: +/// `replace` allows consumption of a struct field by replacing it with another value. +/// Without `replace` you can run into issues like these: /// -/// ```rust,ignore +/// ```ignore /// struct Buffer { buf: Vec } /// /// impl Buffer { @@ -423,6 +494,7 @@ pub fn swap(x: &mut T, y: &mut T) { /// ``` /// # #![allow(dead_code)] /// use std::mem; +/// /// # struct Buffer { buf: Vec } /// impl Buffer { /// fn get_and_reset(&mut self) -> Vec { @@ -439,14 +511,25 @@ pub fn replace(dest: &mut T, mut src: T) -> T { /// Disposes of a value. /// -/// While this does call the argument's implementation of `Drop`, it will not -/// release any borrows, as borrows are based on lexical scope. +/// While this does call the argument's implementation of [`Drop`][drop], +/// it will not release any borrows, as borrows are based on lexical scope. /// /// This effectively does nothing for /// [types which implement `Copy`](../../book/ownership.html#copy-types), /// e.g. integers. Such values are copied and _then_ moved into the function, /// so the value persists after this function call. /// +/// This function is not magic; it is literally defined as +/// +/// ``` +/// pub fn drop(_x: T) { } +/// ``` +/// +/// Because `_x` is moved into the function, it is automatically dropped before +/// the function returns. +/// +/// [drop]: ../ops/trait.Drop.html +/// /// # Examples /// /// Basic usage: @@ -483,8 +566,8 @@ pub fn replace(dest: &mut T, mut src: T) -> T { /// v.push(4); // no problems /// ``` /// -/// Since `RefCell` enforces the borrow rules at runtime, `drop()` can -/// seemingly release a borrow of one: +/// Since `RefCell` enforces the borrow rules at runtime, `drop` can +/// release a `RefCell` borrow: /// /// ``` /// use std::cell::RefCell; @@ -500,7 +583,7 @@ pub fn replace(dest: &mut T, mut src: T) -> T { /// println!("{}", *borrow); /// ``` /// -/// Integers and other types implementing `Copy` are unaffected by `drop()` +/// Integers and other types implementing `Copy` are unaffected by `drop`. /// /// ``` /// #[derive(Copy, Clone)] @@ -518,69 +601,22 @@ pub fn replace(dest: &mut T, mut src: T) -> T { #[stable(feature = "rust1", since = "1.0.0")] pub fn drop(_x: T) { } -macro_rules! repeat_u8_as_u16 { - ($name:expr) => { (($name as u16) << 8 | - ($name as u16)) } -} -macro_rules! repeat_u8_as_u32 { - ($name:expr) => { (($name as u32) << 24 | - ($name as u32) << 16 | - ($name as u32) << 8 | - ($name as u32)) } -} -macro_rules! repeat_u8_as_u64 { - ($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 | - (repeat_u8_as_u32!($name) as u64)) } -} - -// NOTE: Keep synchronized with values used in librustc_trans::trans::adt. -// -// In particular, the POST_DROP_U8 marker must never equal the -// DTOR_NEEDED_U8 marker. -// -// For a while pnkfelix was using 0xc1 here. -// But having the sign bit set is a pain, so 0x1d is probably better. -// -// And of course, 0x00 brings back the old world of zero'ing on drop. -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_U8: u8 = 0x1d; -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_U16: u16 = repeat_u8_as_u16!(POST_DROP_U8); -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8); -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8); - -#[cfg(target_pointer_width = "16")] -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_USIZE: usize = POST_DROP_U16 as usize; -#[cfg(target_pointer_width = "32")] -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_USIZE: usize = POST_DROP_U32 as usize; -#[cfg(target_pointer_width = "64")] -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_USIZE: usize = POST_DROP_U64 as usize; - -/// Interprets `src` as `&U`, and then reads `src` without moving the contained -/// value. +/// Interprets `src` as having type `&U`, and then reads `src` without moving +/// the contained value. /// /// This function will unsafely assume the pointer `src` is valid for -/// `sizeof(U)` bytes by transmuting `&T` to `&U` and then reading the `&U`. It -/// will also unsafely create a copy of the contained value instead of moving -/// out of `src`. +/// [`size_of::()`][size_of] bytes by transmuting `&T` to `&U` and then reading +/// the `&U`. It will also unsafely create a copy of the contained value instead of +/// moving out of `src`. /// /// It is not a compile-time error if `T` and `U` have different sizes, but it /// is highly encouraged to only invoke this function where `T` and `U` have the -/// same size. This function triggers undefined behavior if `U` is larger than +/// same size. This function triggers [undefined behavior][ub] if `U` is larger than /// `T`. /// +/// [ub]: ../../reference.html#behavior-considered-undefined +/// [size_of]: fn.size_of.html +/// /// # Examples /// /// ``` diff --git a/src/libcore/nonzero.rs b/src/libcore/nonzero.rs index 92bbc4efb7..47afaf7735 100644 --- a/src/libcore/nonzero.rs +++ b/src/libcore/nonzero.rs @@ -13,7 +13,6 @@ reason = "needs an RFC to flesh out the design", issue = "27730")] -use marker::Sized; use ops::{CoerceUnsized, Deref}; /// Unsafe trait to indicate what types are usable with the NonZero struct diff --git a/src/libcore/num/bignum.rs b/src/libcore/num/bignum.rs index a881b539ce..1ca550c674 100644 --- a/src/libcore/num/bignum.rs +++ b/src/libcore/num/bignum.rs @@ -27,8 +27,6 @@ issue = "0")] #![macro_use] -use prelude::v1::*; - use mem; use intrinsics; @@ -476,9 +474,9 @@ macro_rules! define_bignum { let sz = if self.size < 1 {1} else {self.size}; let digitlen = mem::size_of::<$ty>() * 2; - try!(write!(f, "{:#x}", self.base[sz-1])); + write!(f, "{:#x}", self.base[sz-1])?; for &v in self.base[..sz-1].iter().rev() { - try!(write!(f, "_{:01$x}", v, digitlen)); + write!(f, "_{:01$x}", v, digitlen)?; } ::result::Result::Ok(()) } @@ -494,6 +492,5 @@ define_bignum!(Big32x40: type=Digit32, n=40); // this one is used for testing only. #[doc(hidden)] pub mod tests { - use prelude::v1::*; define_bignum!(Big8x3: type=u8, n=3); } diff --git a/src/libcore/num/dec2flt/algorithm.rs b/src/libcore/num/dec2flt/algorithm.rs index 4761727cec..604bc7c10d 100644 --- a/src/libcore/num/dec2flt/algorithm.rs +++ b/src/libcore/num/dec2flt/algorithm.rs @@ -10,7 +10,6 @@ //! The various algorithms from the paper. -use prelude::v1::*; use cmp::min; use cmp::Ordering::{Less, Equal, Greater}; use num::diy_float::Fp; @@ -47,7 +46,6 @@ mod fpu_precision { #[cfg(all(target_arch="x86", not(target_feature="sse2")))] mod fpu_precision { use mem::size_of; - use ops::Drop; /// A structure used to preserve the original value of the FPU control word, so that it can be /// restored when the structure is dropped. diff --git a/src/libcore/num/dec2flt/mod.rs b/src/libcore/num/dec2flt/mod.rs index ff2d85307b..eee3e9250f 100644 --- a/src/libcore/num/dec2flt/mod.rs +++ b/src/libcore/num/dec2flt/mod.rs @@ -92,7 +92,6 @@ reason = "internal routines only exposed for testing", issue = "0")] -use prelude::v1::*; use fmt; use str::FromStr; @@ -156,13 +155,13 @@ from_str_float_impl!(f64); /// [`FromStr`]: ../str/trait.FromStr.html /// [`f32`]: ../../std/primitive.f32.html /// [`f64`]: ../../std/primitive.f64.html -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] #[stable(feature = "rust1", since = "1.0.0")] pub struct ParseFloatError { kind: FloatErrorKind } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] enum FloatErrorKind { Empty, Invalid, diff --git a/src/libcore/num/dec2flt/num.rs b/src/libcore/num/dec2flt/num.rs index 81e7856633..34b41fa9de 100644 --- a/src/libcore/num/dec2flt/num.rs +++ b/src/libcore/num/dec2flt/num.rs @@ -12,7 +12,6 @@ // FIXME This module's name is a bit unfortunate, since other modules also import `core::num`. -use prelude::v1::*; use cmp::Ordering::{self, Less, Equal, Greater}; pub use num::bignum::Big32x40 as Big; diff --git a/src/libcore/num/dec2flt/parse.rs b/src/libcore/num/dec2flt/parse.rs index fce1c250a0..d20986faa0 100644 --- a/src/libcore/num/dec2flt/parse.rs +++ b/src/libcore/num/dec2flt/parse.rs @@ -20,7 +20,6 @@ //! modules rely on to not panic (or overflow) in turn. //! To make matters worse, all that happens in a single pass over the input. //! So, be careful when modifying anything, and double-check with the other modules. -use prelude::v1::*; use super::num; use self::ParseResult::{Valid, ShortcutToInf, ShortcutToZero, Invalid}; diff --git a/src/libcore/num/dec2flt/rawfp.rs b/src/libcore/num/dec2flt/rawfp.rs index 68e4dc4b35..e3b58b6cc7 100644 --- a/src/libcore/num/dec2flt/rawfp.rs +++ b/src/libcore/num/dec2flt/rawfp.rs @@ -27,7 +27,6 @@ //! Many functions in this module only handle normal numbers. The dec2flt routines conservatively //! take the universally-correct slow path (Algorithm M) for very small and very large numbers. //! That algorithm needs only next_float() which does handle subnormals and zeros. -use prelude::v1::*; use u32; use cmp::Ordering::{Less, Equal, Greater}; use ops::{Mul, Div, Neg}; diff --git a/src/libcore/num/flt2dec/decoder.rs b/src/libcore/num/flt2dec/decoder.rs index 5420e7bdd2..72529d3da0 100644 --- a/src/libcore/num/flt2dec/decoder.rs +++ b/src/libcore/num/flt2dec/decoder.rs @@ -10,8 +10,6 @@ //! Decodes a floating-point value into individual parts and error ranges. -use prelude::v1::*; - use {f32, f64}; use num::FpCategory; use num::dec2flt::rawfp::RawFloat; @@ -23,7 +21,7 @@ use num::dec2flt::rawfp::RawFloat; /// - Any number from `(mant - minus) * 2^exp` to `(mant + plus) * 2^exp` will /// round to the original value. The range is inclusive only when /// `inclusive` is true. -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct Decoded { /// The scaled mantissa. pub mant: u64, @@ -40,7 +38,7 @@ pub struct Decoded { } /// Decoded unsigned value. -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum FullDecoded { /// Not-a-number. Nan, diff --git a/src/libcore/num/flt2dec/mod.rs b/src/libcore/num/flt2dec/mod.rs index b549f33424..f6c03a59f8 100644 --- a/src/libcore/num/flt2dec/mod.rs +++ b/src/libcore/num/flt2dec/mod.rs @@ -130,7 +130,6 @@ functions. reason = "internal routines only exposed for testing", issue = "0")] -use prelude::v1::*; use i16; pub use self::decoder::{decode, DecodableFloat, FullDecoded, Decoded}; diff --git a/src/libcore/num/flt2dec/strategy/dragon.rs b/src/libcore/num/flt2dec/strategy/dragon.rs index 2d68c3a6d0..6aa4f297e7 100644 --- a/src/libcore/num/flt2dec/strategy/dragon.rs +++ b/src/libcore/num/flt2dec/strategy/dragon.rs @@ -15,8 +15,6 @@ Almost direct (but slightly optimized) Rust translation of Figure 3 of [1]. quickly and accurately. SIGPLAN Not. 31, 5 (May. 1996), 108-116. */ -use prelude::v1::*; - use cmp::Ordering; use num::flt2dec::{Decoded, MAX_SIG_DIGITS, round_up}; diff --git a/src/libcore/num/flt2dec/strategy/grisu.rs b/src/libcore/num/flt2dec/strategy/grisu.rs index 13e01d9a7f..cf70a1978f 100644 --- a/src/libcore/num/flt2dec/strategy/grisu.rs +++ b/src/libcore/num/flt2dec/strategy/grisu.rs @@ -16,8 +16,6 @@ Rust adaptation of Grisu3 algorithm described in [1]. It uses about accurately with integers. SIGPLAN Not. 45, 6 (June 2010), 233-243. */ -use prelude::v1::*; - use num::diy_float::Fp; use num::flt2dec::{Decoded, MAX_SIG_DIGITS, round_up}; diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index 4636811aa4..386daa0846 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -12,17 +12,11 @@ #![stable(feature = "rust1", since = "1.0.0")] -use char::CharExt; -use cmp::PartialOrd; -use convert::{From, TryFrom}; +use convert::TryFrom; use fmt; use intrinsics; -use marker::{Copy, Sized}; use mem::size_of; -use option::Option::{self, Some, None}; -use result::Result::{self, Ok, Err}; -use str::{FromStr, StrExt}; -use slice::SliceExt; +use str::FromStr; /// Provides intentionally-wrapped arithmetic on `T`. /// @@ -619,14 +613,12 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// # #![feature(no_panic_abs)] - /// /// use std::i32; /// /// assert_eq!((-5i32).checked_abs(), Some(5)); /// assert_eq!(i32::MIN.checked_abs(), None); /// ``` - #[unstable(feature = "no_panic_abs", issue = "35057")] + #[stable(feature = "no_panic_abs", since = "1.13.0")] #[inline] pub fn checked_abs(self) -> Option { if self.is_negative() { @@ -901,14 +893,12 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// # #![feature(no_panic_abs)] - /// /// assert_eq!(100i8.wrapping_abs(), 100); /// assert_eq!((-100i8).wrapping_abs(), 100); /// assert_eq!((-128i8).wrapping_abs(), -128); /// assert_eq!((-128i8).wrapping_abs() as u8, 128); /// ``` - #[unstable(feature = "no_panic_abs", issue = "35057")] + #[stable(feature = "no_panic_abs", since = "1.13.0")] #[inline(always)] pub fn wrapping_abs(self) -> Self { if self.is_negative() { @@ -1139,13 +1129,11 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// # #![feature(no_panic_abs)] - /// /// assert_eq!(10i8.overflowing_abs(), (10,false)); /// assert_eq!((-10i8).overflowing_abs(), (10,false)); /// assert_eq!((-128i8).overflowing_abs(), (-128,true)); /// ``` - #[unstable(feature = "no_panic_abs", issue = "35057")] + #[stable(feature = "no_panic_abs", since = "1.13.0")] #[inline] pub fn overflowing_abs(self) -> (Self, bool) { if self.is_negative() { @@ -2217,25 +2205,21 @@ macro_rules! uint_impl { let mut base = self; let mut acc = 1; - let mut prev_base = self; - let mut base_oflo = false; - while exp > 0 { + while exp > 1 { if (exp & 1) == 1 { - if base_oflo { - // ensure overflow occurs in the same manner it - // would have otherwise (i.e. signal any exception - // it would have otherwise). - acc = acc * (prev_base * prev_base); - } else { - acc = acc * base; - } + acc = acc * base; } - prev_base = base; - let (new_base, new_base_oflo) = base.overflowing_mul(base); - base = new_base; - base_oflo = new_base_oflo; exp /= 2; + base = base * base; + } + + // Deal with the final bit of the exponent separately, since + // squaring the base afterwards is not necessary and may cause a + // needless overflow. + if exp == 1 { + acc = acc * base; } + acc } @@ -2411,7 +2395,7 @@ impl usize { /// assert_eq!(nan.classify(), FpCategory::Nan); /// assert_eq!(sub.classify(), FpCategory::Subnormal); /// ``` -#[derive(Copy, Clone, PartialEq, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub enum FpCategory { /// "Not a Number", often obtained by dividing by zero. @@ -2754,11 +2738,11 @@ fn from_str_radix(src: &str, radix: u32) /// on the primitive integer types, such as [`i8::from_str_radix()`]. /// /// [`i8::from_str_radix()`]: ../../std/primitive.i8.html#method.from_str_radix -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] #[stable(feature = "rust1", since = "1.0.0")] pub struct ParseIntError { kind: IntErrorKind } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] enum IntErrorKind { Empty, InvalidDigit, diff --git a/src/libcore/ops.rs b/src/libcore/ops.rs index 9347ac2a8c..85a52da332 100644 --- a/src/libcore/ops.rs +++ b/src/libcore/ops.rs @@ -10,11 +10,21 @@ //! Overloadable operators. //! -//! Implementing these traits allows you to get an effect similar to -//! overloading operators. +//! Implementing these traits allows you to overload certain operators. //! //! Some of these traits are imported by the prelude, so they are available in -//! every Rust program. +//! every Rust program. Only operators backed by traits can be overloaded. For +//! example, the addition operator (`+`) can be overloaded through the `Add` +//! trait, but since the assignment operator (`=`) has no backing trait, there +//! is no way of overloading its semantics. Additionally, this module does not +//! provide any mechanism to create new operators. If traitless overloading or +//! custom operators are required, you should look toward macros or compiler +//! plugins to extend Rust's syntax. +//! +//! Note that the `&&` and `||` operators short-circuit, i.e. they only +//! evaluate their second operand if it contributes to the result. Since this +//! behavior is not enforceable by traits, `&&` and `||` are not supported as +//! overloadable operators. //! //! Many of the operators take their operands by value. In non-generic //! contexts involving built-in types, this is usually not a problem. @@ -62,14 +72,79 @@ //! } //! ``` //! -//! See the documentation for each trait for a minimum implementation that -//! prints something to the screen. +//! See the documentation for each trait for an example implementation. +//! +//! The [`Fn`], [`FnMut`], and [`FnOnce`] traits are implemented by types that can be +//! invoked like functions. Note that `Fn` takes `&self`, `FnMut` takes `&mut +//! self` and `FnOnce` takes `self`. These correspond to the three kinds of +//! methods that can be invoked on an instance: call-by-reference, +//! call-by-mutable-reference, and call-by-value. The most common use of these +//! traits is to act as bounds to higher-level functions that take functions or +//! closures as arguments. +//! +//! [`Fn`]: trait.Fn.html +//! [`FnMut`]: trait.FnMut.html +//! [`FnOnce`]: trait.FnOnce.html +//! +//! Taking a `Fn` as a parameter: +//! +//! ```rust +//! fn call_with_one(func: F) -> usize +//! where F: Fn(usize) -> usize +//! { +//! func(1) +//! } +//! +//! let double = |x| x * 2; +//! assert_eq!(call_with_one(double), 2); +//! ``` +//! +//! Taking a `FnMut` as a parameter: +//! +//! ```rust +//! fn do_twice(mut func: F) +//! where F: FnMut() +//! { +//! func(); +//! func(); +//! } +//! +//! let mut x: usize = 1; +//! { +//! let add_two_to_x = || x += 2; +//! do_twice(add_two_to_x); +//! } +//! +//! assert_eq!(x, 5); +//! ``` +//! +//! Taking a `FnOnce` as a parameter: +//! +//! ```rust +//! fn consume_with_relish(func: F) +//! where F: FnOnce() -> String +//! { +//! // `func` consumes its captured variables, so it cannot be run more +//! // than once +//! println!("Consumed: {}", func()); +//! +//! println!("Delicious!"); +//! +//! // Attempting to invoke `func()` again will throw a `use of moved +//! // value` error for `func` +//! } +//! +//! let x = String::from("x"); +//! let consume_and_return_x = move || x; +//! consume_with_relish(consume_and_return_x); +//! +//! // `consume_and_return_x` can no longer be invoked at this point +//! ``` #![stable(feature = "rust1", since = "1.0.0")] -use cmp::PartialOrd; use fmt; -use marker::{Sized, Unsize}; +use marker::Unsize; /// The `Drop` trait is used to run some code when a value goes out of scope. /// This is sometimes called a 'destructor'. @@ -102,6 +177,13 @@ pub trait Drop { /// /// After this function is over, the memory of `self` will be deallocated. /// + /// This function cannot be called explicitly. This is compiler error + /// [0040]. However, the [`std::mem::drop`] function in the prelude can be + /// used to call the argument's `Drop` implementation. + /// + /// [0040]: https://doc.rust-lang.org/error-index.html#E0040 + /// [`std::mem::drop`]: https://doc.rust-lang.org/std/mem/fn.drop.html + /// /// # Panics /// /// Given that a `panic!` will call `drop()` as it unwinds, any `panic!` in @@ -166,27 +248,46 @@ macro_rules! forward_ref_binop { /// /// # Examples /// -/// A trivial implementation of `Add`. When `Foo + Foo` happens, it ends up -/// calling `add`, and therefore, `main` prints `Adding!`. +/// This example creates a `Point` struct that implements the `Add` trait, and +/// then demonstrates adding two `Point`s. /// /// ``` /// use std::ops::Add; /// -/// struct Foo; +/// #[derive(Debug)] +/// struct Point { +/// x: i32, +/// y: i32, +/// } /// -/// impl Add for Foo { -/// type Output = Foo; +/// impl Add for Point { +/// type Output = Point; /// -/// fn add(self, _rhs: Foo) -> Foo { -/// println!("Adding!"); -/// self +/// fn add(self, other: Point) -> Point { +/// Point { +/// x: self.x + other.x, +/// y: self.y + other.y, +/// } +/// } +/// } +/// +/// impl PartialEq for Point { +/// fn eq(&self, other: &Self) -> bool { +/// self.x == other.x && self.y == other.y /// } /// } /// /// fn main() { -/// Foo + Foo; +/// assert_eq!(Point { x: 1, y: 0 } + Point { x: 2, y: 3 }, +/// Point { x: 3, y: 3 }); /// } /// ``` +/// +/// Note that `RHS = Self` by default, but this is not mandatory. For example, +/// [std::time::SystemTime] implements `Add`, which permits +/// operations of the form `SystemTime = SystemTime + Duration`. +/// +/// [std::time::SystemTime]: ../../std/time/struct.SystemTime.html #[lang = "add"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Add { @@ -220,27 +321,46 @@ add_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Sub`. When `Foo - Foo` happens, it ends up -/// calling `sub`, and therefore, `main` prints `Subtracting!`. +/// This example creates a `Point` struct that implements the `Sub` trait, and +/// then demonstrates subtracting two `Point`s. /// /// ``` /// use std::ops::Sub; /// -/// struct Foo; +/// #[derive(Debug)] +/// struct Point { +/// x: i32, +/// y: i32, +/// } /// -/// impl Sub for Foo { -/// type Output = Foo; +/// impl Sub for Point { +/// type Output = Point; /// -/// fn sub(self, _rhs: Foo) -> Foo { -/// println!("Subtracting!"); -/// self +/// fn sub(self, other: Point) -> Point { +/// Point { +/// x: self.x - other.x, +/// y: self.y - other.y, +/// } +/// } +/// } +/// +/// impl PartialEq for Point { +/// fn eq(&self, other: &Self) -> bool { +/// self.x == other.x && self.y == other.y /// } /// } /// /// fn main() { -/// Foo - Foo; +/// assert_eq!(Point { x: 3, y: 3 } - Point { x: 2, y: 3 }, +/// Point { x: 1, y: 0 }); /// } /// ``` +/// +/// Note that `RHS = Self` by default, but this is not mandatory. For example, +/// [std::time::SystemTime] implements `Sub`, which permits +/// operations of the form `SystemTime = SystemTime - Duration`. +/// +/// [std::time::SystemTime]: ../../std/time/struct.SystemTime.html #[lang = "sub"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Sub { @@ -274,26 +394,94 @@ sub_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Mul`. When `Foo * Foo` happens, it ends up -/// calling `mul`, and therefore, `main` prints `Multiplying!`. +/// Implementing a `Mul`tipliable rational number struct: /// /// ``` /// use std::ops::Mul; /// -/// struct Foo; +/// // The uniqueness of rational numbers in lowest terms is a consequence of +/// // the fundamental theorem of arithmetic. +/// #[derive(Eq)] +/// #[derive(PartialEq, Debug)] +/// struct Rational { +/// nominator: usize, +/// denominator: usize, +/// } +/// +/// impl Rational { +/// fn new(nominator: usize, denominator: usize) -> Self { +/// if denominator == 0 { +/// panic!("Zero is an invalid denominator!"); +/// } +/// +/// // Reduce to lowest terms by dividing by the greatest common +/// // divisor. +/// let gcd = gcd(nominator, denominator); +/// Rational { +/// nominator: nominator / gcd, +/// denominator: denominator / gcd, +/// } +/// } +/// } /// -/// impl Mul for Foo { -/// type Output = Foo; +/// impl Mul for Rational { +/// // The multiplication of rational numbers is a closed operation. +/// type Output = Self; /// -/// fn mul(self, _rhs: Foo) -> Foo { -/// println!("Multiplying!"); -/// self +/// fn mul(self, rhs: Self) -> Self { +/// let nominator = self.nominator * rhs.nominator; +/// let denominator = self.denominator * rhs.denominator; +/// Rational::new(nominator, denominator) /// } /// } /// -/// fn main() { -/// Foo * Foo; +/// // Euclid's two-thousand-year-old algorithm for finding the greatest common +/// // divisor. +/// fn gcd(x: usize, y: usize) -> usize { +/// let mut x = x; +/// let mut y = y; +/// while y != 0 { +/// let t = y; +/// y = x % y; +/// x = t; +/// } +/// x +/// } +/// +/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4)); +/// assert_eq!(Rational::new(2, 3) * Rational::new(3, 4), +/// Rational::new(1, 2)); +/// ``` +/// +/// Note that `RHS = Self` by default, but this is not mandatory. Here is an +/// implementation which enables multiplication of vectors by scalars, as is +/// done in linear algebra. +/// +/// ``` +/// use std::ops::Mul; +/// +/// struct Scalar {value: usize}; +/// +/// #[derive(Debug)] +/// struct Vector {value: Vec}; +/// +/// impl Mul for Scalar { +/// type Output = Vector; +/// +/// fn mul(self, rhs: Vector) -> Vector { +/// Vector {value: rhs.value.iter().map(|v| self.value * v).collect()} +/// } /// } +/// +/// impl PartialEq for Vector { +/// fn eq(&self, other: &Self) -> bool { +/// self.value == other.value +/// } +/// } +/// +/// let scalar = Scalar{value: 3}; +/// let vector = Vector{value: vec![2, 4, 6]}; +/// assert_eq!(scalar * vector, Vector{value: vec![6, 12, 18]}); /// ``` #[lang = "mul"] #[stable(feature = "rust1", since = "1.0.0")] @@ -328,27 +516,101 @@ mul_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Div`. When `Foo / Foo` happens, it ends up -/// calling `div`, and therefore, `main` prints `Dividing!`. +/// Implementing a `Div`idable rational number struct: /// /// ``` /// use std::ops::Div; /// -/// struct Foo; +/// // The uniqueness of rational numbers in lowest terms is a consequence of +/// // the fundamental theorem of arithmetic. +/// #[derive(Eq)] +/// #[derive(PartialEq, Debug)] +/// struct Rational { +/// nominator: usize, +/// denominator: usize, +/// } +/// +/// impl Rational { +/// fn new(nominator: usize, denominator: usize) -> Self { +/// if denominator == 0 { +/// panic!("Zero is an invalid denominator!"); +/// } +/// +/// // Reduce to lowest terms by dividing by the greatest common +/// // divisor. +/// let gcd = gcd(nominator, denominator); +/// Rational { +/// nominator: nominator / gcd, +/// denominator: denominator / gcd, +/// } +/// } +/// } /// -/// impl Div for Foo { -/// type Output = Foo; +/// impl Div for Rational { +/// // The division of rational numbers is a closed operation. +/// type Output = Self; /// -/// fn div(self, _rhs: Foo) -> Foo { -/// println!("Dividing!"); -/// self +/// fn div(self, rhs: Self) -> Self { +/// if rhs.nominator == 0 { +/// panic!("Cannot divide by zero-valued `Rational`!"); +/// } +/// +/// let nominator = self.nominator * rhs.denominator; +/// let denominator = self.denominator * rhs.nominator; +/// Rational::new(nominator, denominator) /// } /// } /// +/// // Euclid's two-thousand-year-old algorithm for finding the greatest common +/// // divisor. +/// fn gcd(x: usize, y: usize) -> usize { +/// let mut x = x; +/// let mut y = y; +/// while y != 0 { +/// let t = y; +/// y = x % y; +/// x = t; +/// } +/// x +/// } +/// /// fn main() { -/// Foo / Foo; +/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4)); +/// assert_eq!(Rational::new(1, 2) / Rational::new(3, 4), +/// Rational::new(2, 3)); /// } /// ``` +/// +/// Note that `RHS = Self` by default, but this is not mandatory. Here is an +/// implementation which enables division of vectors by scalars, as is done in +/// linear algebra. +/// +/// ``` +/// use std::ops::Div; +/// +/// struct Scalar {value: f32}; +/// +/// #[derive(Debug)] +/// struct Vector {value: Vec}; +/// +/// impl Div for Vector { +/// type Output = Vector; +/// +/// fn div(self, rhs: Scalar) -> Vector { +/// Vector {value: self.value.iter().map(|v| v / rhs.value).collect()} +/// } +/// } +/// +/// impl PartialEq for Vector { +/// fn eq(&self, other: &Self) -> bool { +/// self.value == other.value +/// } +/// } +/// +/// let scalar = Scalar{value: 2f32}; +/// let vector = Vector{value: vec![2f32, 4f32, 6f32]}; +/// assert_eq!(vector / scalar, Vector{value: vec![1f32, 2f32, 3f32]}); +/// ``` #[lang = "div"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Div { @@ -399,26 +661,34 @@ div_impl_float! { f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Rem`. When `Foo % Foo` happens, it ends up -/// calling `rem`, and therefore, `main` prints `Remainder-ing!`. +/// This example implements `Rem` on a `SplitSlice` object. After `Rem` is +/// implemented, one can use the `%` operator to find out what the remaining +/// elements of the slice would be after splitting it into equal slices of a +/// given length. /// /// ``` /// use std::ops::Rem; /// -/// struct Foo; +/// #[derive(PartialEq, Debug)] +/// struct SplitSlice<'a, T: 'a> { +/// slice: &'a [T], +/// } /// -/// impl Rem for Foo { -/// type Output = Foo; +/// impl<'a, T> Rem for SplitSlice<'a, T> { +/// type Output = SplitSlice<'a, T>; /// -/// fn rem(self, _rhs: Foo) -> Foo { -/// println!("Remainder-ing!"); -/// self +/// fn rem(self, modulus: usize) -> Self { +/// let len = self.slice.len(); +/// let rem = len % modulus; +/// let start = len - rem; +/// SplitSlice {slice: &self.slice[start..]} /// } /// } /// -/// fn main() { -/// Foo % Foo; -/// } +/// // If we were to divide &[0, 1, 2, 3, 4, 5, 6, 7] into slices of size 3, +/// // the remainder would be &[6, 7] +/// assert_eq!(SplitSlice { slice: &[0, 1, 2, 3, 4, 5, 6, 7] } % 3, +/// SplitSlice { slice: &[6, 7] }); /// ``` #[lang = "rem"] #[stable(feature = "rust1", since = "1.0.0")] @@ -470,26 +740,37 @@ rem_impl_float! { f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Neg`. When `-Foo` happens, it ends up calling -/// `neg`, and therefore, `main` prints `Negating!`. +/// An implementation of `Neg` for `Sign`, which allows the use of `-` to +/// negate its value. /// /// ``` /// use std::ops::Neg; /// -/// struct Foo; +/// #[derive(Debug, PartialEq)] +/// enum Sign { +/// Negative, +/// Zero, +/// Positive, +/// } /// -/// impl Neg for Foo { -/// type Output = Foo; +/// impl Neg for Sign { +/// type Output = Sign; /// -/// fn neg(self) -> Foo { -/// println!("Negating!"); -/// self +/// fn neg(self) -> Sign { +/// match self { +/// Sign::Negative => Sign::Positive, +/// Sign::Zero => Sign::Zero, +/// Sign::Positive => Sign::Negative, +/// } /// } /// } /// -/// fn main() { -/// -Foo; -/// } +/// // a negative positive is a negative +/// assert_eq!(-Sign::Positive, Sign::Negative); +/// // a double negative is a positive +/// assert_eq!(-Sign::Negative, Sign::Positive); +/// // zero is its own negation +/// assert_eq!(-Sign::Zero, Sign::Zero); /// ``` #[lang = "neg"] #[stable(feature = "rust1", since = "1.0.0")] @@ -538,26 +819,31 @@ neg_impl_numeric! { isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Not`. When `!Foo` happens, it ends up calling -/// `not`, and therefore, `main` prints `Not-ing!`. +/// An implementation of `Not` for `Answer`, which enables the use of `!` to +/// invert its value. /// /// ``` /// use std::ops::Not; /// -/// struct Foo; +/// #[derive(Debug, PartialEq)] +/// enum Answer { +/// Yes, +/// No, +/// } /// -/// impl Not for Foo { -/// type Output = Foo; +/// impl Not for Answer { +/// type Output = Answer; /// -/// fn not(self) -> Foo { -/// println!("Not-ing!"); -/// self +/// fn not(self) -> Answer { +/// match self { +/// Answer::Yes => Answer::No, +/// Answer::No => Answer::Yes +/// } /// } /// } /// -/// fn main() { -/// !Foo; -/// } +/// assert_eq!(!Answer::Yes, Answer::No); +/// assert_eq!(!Answer::No, Answer::Yes); /// ``` #[lang = "not"] #[stable(feature = "rust1", since = "1.0.0")] @@ -591,25 +877,55 @@ not_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// /// # Examples /// -/// A trivial implementation of `BitAnd`. When `Foo & Foo` happens, it ends up -/// calling `bitand`, and therefore, `main` prints `Bitwise And-ing!`. +/// In this example, the `&` operator is lifted to a trivial `Scalar` type. /// /// ``` /// use std::ops::BitAnd; /// -/// struct Foo; +/// #[derive(Debug, PartialEq)] +/// struct Scalar(bool); /// -/// impl BitAnd for Foo { -/// type Output = Foo; +/// impl BitAnd for Scalar { +/// type Output = Self; /// -/// fn bitand(self, _rhs: Foo) -> Foo { -/// println!("Bitwise And-ing!"); -/// self +/// // rhs is the "right-hand side" of the expression `a & b` +/// fn bitand(self, rhs: Self) -> Self { +/// Scalar(self.0 & rhs.0) +/// } +/// } +/// +/// fn main() { +/// assert_eq!(Scalar(true) & Scalar(true), Scalar(true)); +/// assert_eq!(Scalar(true) & Scalar(false), Scalar(false)); +/// assert_eq!(Scalar(false) & Scalar(true), Scalar(false)); +/// assert_eq!(Scalar(false) & Scalar(false), Scalar(false)); +/// } +/// ``` +/// +/// In this example, the `BitAnd` trait is implemented for a `BooleanVector` +/// struct. +/// +/// ``` +/// use std::ops::BitAnd; +/// +/// #[derive(Debug, PartialEq)] +/// struct BooleanVector(Vec); +/// +/// impl BitAnd for BooleanVector { +/// type Output = Self; +/// +/// fn bitand(self, BooleanVector(rhs): Self) -> Self { +/// let BooleanVector(lhs) = self; +/// assert_eq!(lhs.len(), rhs.len()); +/// BooleanVector(lhs.iter().zip(rhs.iter()).map(|(x, y)| *x && *y).collect()) /// } /// } /// /// fn main() { -/// Foo & Foo; +/// let bv1 = BooleanVector(vec![true, true, false, false]); +/// let bv2 = BooleanVector(vec![true, false, true, false]); +/// let expected = BooleanVector(vec![true, false, false, false]); +/// assert_eq!(bv1 & bv2, expected); /// } /// ``` #[lang = "bitand"] @@ -644,25 +960,55 @@ bitand_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// /// # Examples /// -/// A trivial implementation of `BitOr`. When `Foo | Foo` happens, it ends up -/// calling `bitor`, and therefore, `main` prints `Bitwise Or-ing!`. +/// In this example, the `|` operator is lifted to a trivial `Scalar` type. /// /// ``` /// use std::ops::BitOr; /// -/// struct Foo; +/// #[derive(Debug, PartialEq)] +/// struct Scalar(bool); /// -/// impl BitOr for Foo { -/// type Output = Foo; +/// impl BitOr for Scalar { +/// type Output = Self; /// -/// fn bitor(self, _rhs: Foo) -> Foo { -/// println!("Bitwise Or-ing!"); -/// self +/// // rhs is the "right-hand side" of the expression `a | b` +/// fn bitor(self, rhs: Self) -> Self { +/// Scalar(self.0 | rhs.0) +/// } +/// } +/// +/// fn main() { +/// assert_eq!(Scalar(true) | Scalar(true), Scalar(true)); +/// assert_eq!(Scalar(true) | Scalar(false), Scalar(true)); +/// assert_eq!(Scalar(false) | Scalar(true), Scalar(true)); +/// assert_eq!(Scalar(false) | Scalar(false), Scalar(false)); +/// } +/// ``` +/// +/// In this example, the `BitOr` trait is implemented for a `BooleanVector` +/// struct. +/// +/// ``` +/// use std::ops::BitOr; +/// +/// #[derive(Debug, PartialEq)] +/// struct BooleanVector(Vec); +/// +/// impl BitOr for BooleanVector { +/// type Output = Self; +/// +/// fn bitor(self, BooleanVector(rhs): Self) -> Self { +/// let BooleanVector(lhs) = self; +/// assert_eq!(lhs.len(), rhs.len()); +/// BooleanVector(lhs.iter().zip(rhs.iter()).map(|(x, y)| *x || *y).collect()) /// } /// } /// /// fn main() { -/// Foo | Foo; +/// let bv1 = BooleanVector(vec![true, true, false, false]); +/// let bv2 = BooleanVector(vec![true, false, true, false]); +/// let expected = BooleanVector(vec![true, true, true, false]); +/// assert_eq!(bv1 | bv2, expected); /// } /// ``` #[lang = "bitor"] @@ -697,25 +1043,58 @@ bitor_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// /// # Examples /// -/// A trivial implementation of `BitXor`. When `Foo ^ Foo` happens, it ends up -/// calling `bitxor`, and therefore, `main` prints `Bitwise Xor-ing!`. +/// In this example, the `^` operator is lifted to a trivial `Scalar` type. /// /// ``` /// use std::ops::BitXor; /// -/// struct Foo; +/// #[derive(Debug, PartialEq)] +/// struct Scalar(bool); /// -/// impl BitXor for Foo { -/// type Output = Foo; +/// impl BitXor for Scalar { +/// type Output = Self; /// -/// fn bitxor(self, _rhs: Foo) -> Foo { -/// println!("Bitwise Xor-ing!"); -/// self +/// // rhs is the "right-hand side" of the expression `a ^ b` +/// fn bitxor(self, rhs: Self) -> Self { +/// Scalar(self.0 ^ rhs.0) /// } /// } /// /// fn main() { -/// Foo ^ Foo; +/// assert_eq!(Scalar(true) ^ Scalar(true), Scalar(false)); +/// assert_eq!(Scalar(true) ^ Scalar(false), Scalar(true)); +/// assert_eq!(Scalar(false) ^ Scalar(true), Scalar(true)); +/// assert_eq!(Scalar(false) ^ Scalar(false), Scalar(false)); +/// } +/// ``` +/// +/// In this example, the `BitXor` trait is implemented for a `BooleanVector` +/// struct. +/// +/// ``` +/// use std::ops::BitXor; +/// +/// #[derive(Debug, PartialEq)] +/// struct BooleanVector(Vec); +/// +/// impl BitXor for BooleanVector { +/// type Output = Self; +/// +/// fn bitxor(self, BooleanVector(rhs): Self) -> Self { +/// let BooleanVector(lhs) = self; +/// assert_eq!(lhs.len(), rhs.len()); +/// BooleanVector(lhs.iter() +/// .zip(rhs.iter()) +/// .map(|(x, y)| (*x || *y) && !(*x && *y)) +/// .collect()) +/// } +/// } +/// +/// fn main() { +/// let bv1 = BooleanVector(vec![true, true, false, false]); +/// let bv2 = BooleanVector(vec![true, false, true, false]); +/// let expected = BooleanVector(vec![false, true, true, false]); +/// assert_eq!(bv1 ^ bv2, expected); /// } /// ``` #[lang = "bitxor"] @@ -750,25 +1129,54 @@ bitxor_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// /// # Examples /// -/// A trivial implementation of `Shl`. When `Foo << Foo` happens, it ends up -/// calling `shl`, and therefore, `main` prints `Shifting left!`. +/// An implementation of `Shl` that lifts the `<<` operation on integers to a +/// `Scalar` struct. /// /// ``` /// use std::ops::Shl; /// -/// struct Foo; +/// #[derive(PartialEq, Debug)] +/// struct Scalar(usize); /// -/// impl Shl for Foo { -/// type Output = Foo; +/// impl Shl for Scalar { +/// type Output = Self; /// -/// fn shl(self, _rhs: Foo) -> Foo { -/// println!("Shifting left!"); -/// self +/// fn shl(self, Scalar(rhs): Self) -> Scalar { +/// let Scalar(lhs) = self; +/// Scalar(lhs << rhs) +/// } +/// } +/// fn main() { +/// assert_eq!(Scalar(4) << Scalar(2), Scalar(16)); +/// } +/// ``` +/// +/// An implementation of `Shl` that spins a vector leftward by a given amount. +/// +/// ``` +/// use std::ops::Shl; +/// +/// #[derive(PartialEq, Debug)] +/// struct SpinVector { +/// vec: Vec, +/// } +/// +/// impl Shl for SpinVector { +/// type Output = Self; +/// +/// fn shl(self, rhs: usize) -> SpinVector { +/// // rotate the vector by `rhs` places +/// let (a, b) = self.vec.split_at(rhs); +/// let mut spun_vector: Vec = vec![]; +/// spun_vector.extend_from_slice(b); +/// spun_vector.extend_from_slice(a); +/// SpinVector { vec: spun_vector } /// } /// } /// /// fn main() { -/// Foo << Foo; +/// assert_eq!(SpinVector { vec: vec![0, 1, 2, 3, 4] } << 2, +/// SpinVector { vec: vec![2, 3, 4, 0, 1] }); /// } /// ``` #[lang = "shl"] @@ -822,25 +1230,54 @@ shl_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } /// /// # Examples /// -/// A trivial implementation of `Shr`. When `Foo >> Foo` happens, it ends up -/// calling `shr`, and therefore, `main` prints `Shifting right!`. +/// An implementation of `Shr` that lifts the `>>` operation on integers to a +/// `Scalar` struct. /// /// ``` /// use std::ops::Shr; /// -/// struct Foo; +/// #[derive(PartialEq, Debug)] +/// struct Scalar(usize); /// -/// impl Shr for Foo { -/// type Output = Foo; +/// impl Shr for Scalar { +/// type Output = Self; /// -/// fn shr(self, _rhs: Foo) -> Foo { -/// println!("Shifting right!"); -/// self +/// fn shr(self, Scalar(rhs): Self) -> Scalar { +/// let Scalar(lhs) = self; +/// Scalar(lhs >> rhs) +/// } +/// } +/// fn main() { +/// assert_eq!(Scalar(16) >> Scalar(2), Scalar(4)); +/// } +/// ``` +/// +/// An implementation of `Shr` that spins a vector rightward by a given amount. +/// +/// ``` +/// use std::ops::Shr; +/// +/// #[derive(PartialEq, Debug)] +/// struct SpinVector { +/// vec: Vec, +/// } +/// +/// impl Shr for SpinVector { +/// type Output = Self; +/// +/// fn shr(self, rhs: usize) -> SpinVector { +/// // rotate the vector by `rhs` places +/// let (a, b) = self.vec.split_at(self.vec.len() - rhs); +/// let mut spun_vector: Vec = vec![]; +/// spun_vector.extend_from_slice(b); +/// spun_vector.extend_from_slice(a); +/// SpinVector { vec: spun_vector } /// } /// } /// /// fn main() { -/// Foo >> Foo; +/// assert_eq!(SpinVector { vec: vec![0, 1, 2, 3, 4] } >> 2, +/// SpinVector { vec: vec![3, 4, 0, 1, 2] }); /// } /// ``` #[lang = "shr"] @@ -894,25 +1331,36 @@ shr_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } /// /// # Examples /// -/// A trivial implementation of `AddAssign`. When `Foo += Foo` happens, it ends up -/// calling `add_assign`, and therefore, `main` prints `Adding!`. +/// This example creates a `Point` struct that implements the `AddAssign` +/// trait, and then demonstrates add-assigning to a mutable `Point`. /// /// ``` /// use std::ops::AddAssign; /// -/// struct Foo; +/// #[derive(Debug)] +/// struct Point { +/// x: i32, +/// y: i32, +/// } /// -/// impl AddAssign for Foo { -/// fn add_assign(&mut self, _rhs: Foo) { -/// println!("Adding!"); +/// impl AddAssign for Point { +/// fn add_assign(&mut self, other: Point) { +/// *self = Point { +/// x: self.x + other.x, +/// y: self.y + other.y, +/// }; /// } /// } /// -/// # #[allow(unused_assignments)] -/// fn main() { -/// let mut foo = Foo; -/// foo += Foo; +/// impl PartialEq for Point { +/// fn eq(&self, other: &Self) -> bool { +/// self.x == other.x && self.y == other.y +/// } /// } +/// +/// let mut point = Point { x: 1, y: 0 }; +/// point += Point { x: 2, y: 3 }; +/// assert_eq!(point, Point { x: 3, y: 3 }); /// ``` #[lang = "add_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] @@ -939,25 +1387,36 @@ add_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `SubAssign`. When `Foo -= Foo` happens, it ends up -/// calling `sub_assign`, and therefore, `main` prints `Subtracting!`. +/// This example creates a `Point` struct that implements the `SubAssign` +/// trait, and then demonstrates sub-assigning to a mutable `Point`. /// /// ``` /// use std::ops::SubAssign; /// -/// struct Foo; +/// #[derive(Debug)] +/// struct Point { +/// x: i32, +/// y: i32, +/// } /// -/// impl SubAssign for Foo { -/// fn sub_assign(&mut self, _rhs: Foo) { -/// println!("Subtracting!"); +/// impl SubAssign for Point { +/// fn sub_assign(&mut self, other: Point) { +/// *self = Point { +/// x: self.x - other.x, +/// y: self.y - other.y, +/// }; /// } /// } /// -/// # #[allow(unused_assignments)] -/// fn main() { -/// let mut foo = Foo; -/// foo -= Foo; +/// impl PartialEq for Point { +/// fn eq(&self, other: &Self) -> bool { +/// self.x == other.x && self.y == other.y +/// } /// } +/// +/// let mut point = Point { x: 3, y: 3 }; +/// point -= Point { x: 2, y: 3 }; +/// assert_eq!(point, Point {x: 1, y: 0}); /// ``` #[lang = "sub_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] @@ -1117,24 +1576,66 @@ rem_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `BitAndAssign`. When `Foo &= Foo` happens, it ends up -/// calling `bitand_assign`, and therefore, `main` prints `Bitwise And-ing!`. +/// In this example, the `&=` operator is lifted to a trivial `Scalar` type. /// /// ``` /// use std::ops::BitAndAssign; /// -/// struct Foo; +/// #[derive(Debug, PartialEq)] +/// struct Scalar(bool); /// -/// impl BitAndAssign for Foo { -/// fn bitand_assign(&mut self, _rhs: Foo) { -/// println!("Bitwise And-ing!"); +/// impl BitAndAssign for Scalar { +/// // rhs is the "right-hand side" of the expression `a &= b` +/// fn bitand_assign(&mut self, rhs: Self) { +/// *self = Scalar(self.0 & rhs.0) /// } /// } /// -/// # #[allow(unused_assignments)] /// fn main() { -/// let mut foo = Foo; -/// foo &= Foo; +/// let mut scalar = Scalar(true); +/// scalar &= Scalar(true); +/// assert_eq!(scalar, Scalar(true)); +/// +/// let mut scalar = Scalar(true); +/// scalar &= Scalar(false); +/// assert_eq!(scalar, Scalar(false)); +/// +/// let mut scalar = Scalar(false); +/// scalar &= Scalar(true); +/// assert_eq!(scalar, Scalar(false)); +/// +/// let mut scalar = Scalar(false); +/// scalar &= Scalar(false); +/// assert_eq!(scalar, Scalar(false)); +/// } +/// ``` +/// +/// In this example, the `BitAndAssign` trait is implemented for a +/// `BooleanVector` struct. +/// +/// ``` +/// use std::ops::BitAndAssign; +/// +/// #[derive(Debug, PartialEq)] +/// struct BooleanVector(Vec); +/// +/// impl BitAndAssign for BooleanVector { +/// // rhs is the "right-hand side" of the expression `a &= b` +/// fn bitand_assign(&mut self, rhs: Self) { +/// assert_eq!(self.0.len(), rhs.0.len()); +/// *self = BooleanVector(self.0 +/// .iter() +/// .zip(rhs.0.iter()) +/// .map(|(x, y)| *x && *y) +/// .collect()); +/// } +/// } +/// +/// fn main() { +/// let mut bv = BooleanVector(vec![true, true, false, false]); +/// bv &= BooleanVector(vec![true, false, true, false]); +/// let expected = BooleanVector(vec![true, false, false, false]); +/// assert_eq!(bv, expected); /// } /// ``` #[lang = "bitand_assign"] @@ -1376,28 +1877,44 @@ shr_assign_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } /// /// # Examples /// -/// A trivial implementation of `Index`. When `Foo[Bar]` happens, it ends up -/// calling `index`, and therefore, `main` prints `Indexing!`. +/// This example implements `Index` on a read-only `NucleotideCount` container, +/// enabling individual counts to be retrieved with index syntax. /// /// ``` /// use std::ops::Index; /// -/// #[derive(Copy, Clone)] -/// struct Foo; -/// struct Bar; +/// enum Nucleotide { +/// A, +/// C, +/// G, +/// T, +/// } /// -/// impl Index for Foo { -/// type Output = Foo; +/// struct NucleotideCount { +/// a: usize, +/// c: usize, +/// g: usize, +/// t: usize, +/// } /// -/// fn index<'a>(&'a self, _index: Bar) -> &'a Foo { -/// println!("Indexing!"); -/// self +/// impl Index for NucleotideCount { +/// type Output = usize; +/// +/// fn index(&self, nucleotide: Nucleotide) -> &usize { +/// match nucleotide { +/// Nucleotide::A => &self.a, +/// Nucleotide::C => &self.c, +/// Nucleotide::G => &self.g, +/// Nucleotide::T => &self.t, +/// } /// } /// } /// -/// fn main() { -/// Foo[Bar]; -/// } +/// let nucleotide_count = NucleotideCount {a: 14, c: 9, g: 10, t: 12}; +/// assert_eq!(nucleotide_count[Nucleotide::A], 14); +/// assert_eq!(nucleotide_count[Nucleotide::C], 9); +/// assert_eq!(nucleotide_count[Nucleotide::G], 10); +/// assert_eq!(nucleotide_count[Nucleotide::T], 12); /// ``` #[lang = "index"] #[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] @@ -1462,17 +1979,30 @@ pub trait IndexMut: Index { /// /// # Examples /// +/// The `..` syntax is a `RangeFull`: +/// +/// ``` +/// assert_eq!((..), std::ops::RangeFull); /// ``` -/// fn main() { -/// assert_eq!((..), std::ops::RangeFull); /// -/// let arr = [0, 1, 2, 3]; -/// assert_eq!(arr[ .. ], [0,1,2,3]); // RangeFull -/// assert_eq!(arr[ ..3], [0,1,2 ]); -/// assert_eq!(arr[1.. ], [ 1,2,3]); -/// assert_eq!(arr[1..3], [ 1,2 ]); +/// It does not have an `IntoIterator` implementation, so you can't use it in a +/// `for` loop directly. This won't compile: +/// +/// ```ignore +/// for i in .. { +/// // ... /// } /// ``` +/// +/// Used as a slicing index, `RangeFull` produces the full array as a slice. +/// +/// ``` +/// let arr = [0, 1, 2, 3]; +/// assert_eq!(arr[ .. ], [0,1,2,3]); // RangeFull +/// assert_eq!(arr[ ..3], [0,1,2 ]); +/// assert_eq!(arr[1.. ], [ 1,2,3]); +/// assert_eq!(arr[1..3], [ 1,2 ]); +/// ``` #[derive(Copy, Clone, PartialEq, Eq, Hash)] #[stable(feature = "rust1", since = "1.0.0")] pub struct RangeFull; @@ -1605,17 +2135,33 @@ impl> RangeFrom { /// /// It cannot serve as an iterator because it doesn't have a starting point. /// +/// # Examples +/// +/// The `..{integer}` syntax is a `RangeTo`: +/// +/// ``` +/// assert_eq!((..5), std::ops::RangeTo{ end: 5 }); /// ``` -/// fn main() { -/// assert_eq!((..5), std::ops::RangeTo{ end: 5 }); /// -/// let arr = [0, 1, 2, 3]; -/// assert_eq!(arr[ .. ], [0,1,2,3]); -/// assert_eq!(arr[ ..3], [0,1,2 ]); // RangeTo -/// assert_eq!(arr[1.. ], [ 1,2,3]); -/// assert_eq!(arr[1..3], [ 1,2 ]); +/// It does not have an `IntoIterator` implementation, so you can't use it in a +/// `for` loop directly. This won't compile: +/// +/// ```ignore +/// for i in ..5 { +/// // ... /// } /// ``` +/// +/// When used as a slicing index, `RangeTo` produces a slice of all array +/// elements before the index indicated by `end`. +/// +/// ``` +/// let arr = [0, 1, 2, 3]; +/// assert_eq!(arr[ .. ], [0,1,2,3]); +/// assert_eq!(arr[ ..3], [0,1,2 ]); // RangeTo +/// assert_eq!(arr[1.. ], [ 1,2,3]); +/// assert_eq!(arr[1..3], [ 1,2 ]); +/// ``` #[derive(Copy, Clone, PartialEq, Eq, Hash)] #[stable(feature = "rust1", since = "1.0.0")] pub struct RangeTo { @@ -1743,16 +2289,31 @@ impl> RangeInclusive { /// /// # Examples /// +/// The `...{integer}` syntax is a `RangeToInclusive`: +/// /// ``` /// #![feature(inclusive_range,inclusive_range_syntax)] -/// fn main() { -/// assert_eq!((...5), std::ops::RangeToInclusive{ end: 5 }); +/// assert_eq!((...5), std::ops::RangeToInclusive{ end: 5 }); +/// ``` /// -/// let arr = [0, 1, 2, 3]; -/// assert_eq!(arr[ ...2], [0,1,2 ]); // RangeToInclusive -/// assert_eq!(arr[1...2], [ 1,2 ]); +/// It does not have an `IntoIterator` implementation, so you can't use it in a +/// `for` loop directly. This won't compile: +/// +/// ```ignore +/// for i in ...5 { +/// // ... /// } /// ``` +/// +/// When used as a slicing index, `RangeToInclusive` produces a slice of all +/// array elements up to and including the index indicated by `end`. +/// +/// ``` +/// #![feature(inclusive_range_syntax)] +/// let arr = [0, 1, 2, 3]; +/// assert_eq!(arr[ ...2], [0,1,2 ]); // RangeToInclusive +/// assert_eq!(arr[1...2], [ 1,2 ]); +/// ``` #[derive(Copy, Clone, PartialEq, Eq, Hash)] #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] pub struct RangeToInclusive { @@ -1901,6 +2462,35 @@ impl<'a, T: ?Sized> DerefMut for &'a mut T { } /// A version of the call operator that takes an immutable receiver. +/// +/// # Examples +/// +/// Closures automatically implement this trait, which allows them to be +/// invoked. Note, however, that `Fn` takes an immutable reference to any +/// captured variables. To take a mutable capture, implement [`FnMut`], and to +/// consume the capture, implement [`FnOnce`]. +/// +/// [`FnMut`]: trait.FnMut.html +/// [`FnOnce`]: trait.FnOnce.html +/// +/// ``` +/// let square = |x| x * x; +/// assert_eq!(square(5), 25); +/// ``` +/// +/// Closures can also be passed to higher-level functions through a `Fn` +/// parameter (or a `FnMut` or `FnOnce` parameter, which are supertraits of +/// `Fn`). +/// +/// ``` +/// fn call_with_one(func: F) -> usize +/// where F: Fn(usize) -> usize { +/// func(1) +/// } +/// +/// let double = |x| x * 2; +/// assert_eq!(call_with_one(double), 2); +/// ``` #[lang = "fn"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_paren_sugar] @@ -1912,6 +2502,40 @@ pub trait Fn : FnMut { } /// A version of the call operator that takes a mutable receiver. +/// +/// # Examples +/// +/// Closures that mutably capture variables automatically implement this trait, +/// which allows them to be invoked. +/// +/// ``` +/// let mut x = 5; +/// { +/// let mut square_x = || x *= x; +/// square_x(); +/// } +/// assert_eq!(x, 25); +/// ``` +/// +/// Closures can also be passed to higher-level functions through a `FnMut` +/// parameter (or a `FnOnce` parameter, which is a supertrait of `FnMut`). +/// +/// ``` +/// fn do_twice(mut func: F) +/// where F: FnMut() +/// { +/// func(); +/// func(); +/// } +/// +/// let mut x: usize = 1; +/// { +/// let add_two_to_x = || x += 2; +/// do_twice(add_two_to_x); +/// } +/// +/// assert_eq!(x, 5); +/// ``` #[lang = "fn_mut"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_paren_sugar] @@ -1923,6 +2547,41 @@ pub trait FnMut : FnOnce { } /// A version of the call operator that takes a by-value receiver. +/// +/// # Examples +/// +/// By-value closures automatically implement this trait, which allows them to +/// be invoked. +/// +/// ``` +/// let x = 5; +/// let square_x = move || x * x; +/// assert_eq!(square_x(), 25); +/// ``` +/// +/// By-value Closures can also be passed to higher-level functions through a +/// `FnOnce` parameter. +/// +/// ``` +/// fn consume_with_relish(func: F) +/// where F: FnOnce() -> String +/// { +/// // `func` consumes its captured variables, so it cannot be run more +/// // than once +/// println!("Consumed: {}", func()); +/// +/// println!("Delicious!"); +/// +/// // Attempting to invoke `func()` again will throw a `use of moved +/// // value` error for `func` +/// } +/// +/// let x = String::from("x"); +/// let consume_and_return_x = move || x; +/// consume_with_relish(consume_and_return_x); +/// +/// // `consume_and_return_x` can no longer be invoked at this point +/// ``` #[lang = "fn_once"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_paren_sugar] @@ -1938,9 +2597,6 @@ pub trait FnOnce { } mod impls { - use marker::Sized; - use super::{Fn, FnMut, FnOnce}; - #[stable(feature = "rust1", since = "1.0.0")] impl<'a,A,F:?Sized> Fn for &'a F where F : Fn @@ -2145,3 +2801,74 @@ pub trait BoxPlace : Place { /// Creates a globally fresh place. fn make_place() -> Self; } + +/// A trait for types which have success and error states and are meant to work +/// with the question mark operator. +/// When the `?` operator is used with a value, whether the value is in the +/// success or error state is determined by calling `translate`. +/// +/// This trait is **very** experimental, it will probably be iterated on heavily +/// before it is stabilised. Implementors should expect change. Users of `?` +/// should not rely on any implementations of `Carrier` other than `Result`, +/// i.e., you should not expect `?` to continue to work with `Option`, etc. +#[unstable(feature = "question_mark_carrier", issue = "31436")] +pub trait Carrier { + /// The type of the value when computation succeeds. + type Success; + /// The type of the value when computation errors out. + type Error; + + /// Create a `Carrier` from a success value. + fn from_success(Self::Success) -> Self; + + /// Create a `Carrier` from an error value. + fn from_error(Self::Error) -> Self; + + /// Translate this `Carrier` to another implementation of `Carrier` with the + /// same associated types. + fn translate(self) -> T where T: Carrier; +} + +#[unstable(feature = "question_mark_carrier", issue = "31436")] +impl Carrier for Result { + type Success = U; + type Error = V; + + fn from_success(u: U) -> Result { + Ok(u) + } + + fn from_error(e: V) -> Result { + Err(e) + } + + fn translate(self) -> T + where T: Carrier + { + match self { + Ok(u) => T::from_success(u), + Err(e) => T::from_error(e), + } + } +} + +struct _DummyErrorType; + +impl Carrier for _DummyErrorType { + type Success = (); + type Error = (); + + fn from_success(_: ()) -> _DummyErrorType { + _DummyErrorType + } + + fn from_error(_: ()) -> _DummyErrorType { + _DummyErrorType + } + + fn translate(self) -> T + where T: Carrier + { + T::from_success(()) + } +} diff --git a/src/libcore/option.rs b/src/libcore/option.rs index fe508adb71..cb18feff73 100644 --- a/src/libcore/option.rs +++ b/src/libcore/option.rs @@ -10,9 +10,9 @@ //! Optional values. //! -//! Type `Option` represents an optional value: every `Option` -//! is either `Some` and contains a value, or `None`, and -//! does not. `Option` types are very common in Rust code, as +//! Type [`Option`] represents an optional value: every [`Option`] +//! is either [`Some`] and contains a value, or [`None`], and +//! does not. [`Option`] types are very common in Rust code, as //! they have a number of uses: //! //! * Initial values @@ -26,8 +26,8 @@ //! * Nullable pointers //! * Swapping things out of difficult situations //! -//! Options are commonly paired with pattern matching to query the presence -//! of a value and take action, always accounting for the `None` case. +//! [`Option`]s are commonly paired with pattern matching to query the presence +//! of a value and take action, always accounting for the [`None`] case. //! //! ``` //! fn divide(numerator: f64, denominator: f64) -> Option { @@ -57,13 +57,13 @@ //! //! Rust's pointer types must always point to a valid location; there are //! no "null" pointers. Instead, Rust has *optional* pointers, like -//! the optional owned box, `Option>`. +//! the optional owned box, [`Option`]`<`[`Box`]`>`. //! -//! The following example uses `Option` to create an optional box of -//! `i32`. Notice that in order to use the inner `i32` value first the +//! The following example uses [`Option`] to create an optional box of +//! [`i32`]. Notice that in order to use the inner [`i32`] value first the //! `check_optional` function needs to use pattern matching to -//! determine whether the box has a value (i.e. it is `Some(...)`) or -//! not (`None`). +//! determine whether the box has a value (i.e. it is [`Some(...)`][`Some`]) or +//! not ([`None`]). //! //! ``` //! let optional: Option> = None; @@ -80,14 +80,14 @@ //! } //! ``` //! -//! This usage of `Option` to create safe nullable pointers is so +//! This usage of [`Option`] to create safe nullable pointers is so //! common that Rust does special optimizations to make the -//! representation of `Option>` a single pointer. Optional pointers +//! representation of [`Option`]`<`[`Box`]`>` a single pointer. Optional pointers //! in Rust are stored as efficiently as any other pointer type. //! //! # Examples //! -//! Basic pattern matching on `Option`: +//! Basic pattern matching on [`Option`]: //! //! ``` //! let msg = Some("howdy"); @@ -101,7 +101,7 @@ //! let unwrapped_msg = msg.unwrap_or("default message"); //! ``` //! -//! Initialize a result to `None` before a loop: +//! Initialize a result to [`None`] before a loop: //! //! ``` //! enum Kingdom { Plant(u32, &'static str), Animal(u32, &'static str) } @@ -136,20 +136,17 @@ //! None => println!("there are no animals :("), //! } //! ``` +//! +//! [`Option`]: enum.Option.html +//! [`Some`]: enum.Option.html#variant.Some +//! [`None`]: enum.Option.html#variant.None +//! [`Box`]: ../../std/boxed/struct.Box.html +//! [`i32`]: ../../std/primitive.i32.html #![stable(feature = "rust1", since = "1.0.0")] -use self::Option::*; - -use clone::Clone; -use convert::From; -use default::Default; -use iter::ExactSizeIterator; -use iter::{Iterator, DoubleEndedIterator, FromIterator, IntoIterator}; +use iter::{FromIterator, FusedIterator}; use mem; -use ops::FnOnce; -use result::Result::{Ok, Err}; -use result::Result; // Note that this is not a lang item per se, but it has a hidden dependency on // `Iterator`, which is one. The compiler assumes that the `next` method of @@ -165,7 +162,7 @@ pub enum Option { None, /// Some value `T` #[stable(feature = "rust1", since = "1.0.0")] - Some(#[stable(feature = "rust1", since = "1.0.0")] T) + Some(#[stable(feature = "rust1", since = "1.0.0")] T), } ///////////////////////////////////////////////////////////////////////////// @@ -177,7 +174,7 @@ impl Option { // Querying the contained values ///////////////////////////////////////////////////////////////////////// - /// Returns `true` if the option is a `Some` value + /// Returns `true` if the option is a `Some` value. /// /// # Examples /// @@ -197,7 +194,7 @@ impl Option { } } - /// Returns `true` if the option is a `None` value + /// Returns `true` if the option is a `None` value. /// /// # Examples /// @@ -218,15 +215,17 @@ impl Option { // Adapter for working with references ///////////////////////////////////////////////////////////////////////// - /// Converts from `Option` to `Option<&T>` + /// Converts from `Option` to `Option<&T>`. /// /// # Examples /// /// Convert an `Option` into an `Option`, preserving the original. - /// The `map` method takes the `self` argument by value, consuming the original, + /// The [`map`] method takes the `self` argument by value, consuming the original, /// so this technique uses `as_ref` to first take an `Option` to a reference /// to the value inside the original. /// + /// [`map`]: enum.Option.html#method.map + /// /// ``` /// let num_as_str: Option = Some("10".to_string()); /// // First, cast `Option` to `Option<&String>` with `as_ref`, @@ -243,7 +242,7 @@ impl Option { } } - /// Converts from `Option` to `Option<&mut T>` + /// Converts from `Option` to `Option<&mut T>`. /// /// # Examples /// @@ -297,16 +296,14 @@ impl Option { /// Moves the value `v` out of the `Option` if it is `Some(v)`. /// - /// # Panics - /// - /// Panics if the self value equals `None`. - /// - /// # Safety note - /// /// In general, because this function may panic, its use is discouraged. /// Instead, prefer to use pattern matching and handle the `None` /// case explicitly. /// + /// # Panics + /// + /// Panics if the self value equals `None`. + /// /// # Examples /// /// ``` @@ -366,7 +363,7 @@ impl Option { // Transforming contained values ///////////////////////////////////////////////////////////////////////// - /// Maps an `Option` to `Option` by applying a function to a contained value + /// Maps an `Option` to `Option` by applying a function to a contained value. /// /// # Examples /// @@ -432,8 +429,12 @@ impl Option { } } - /// Transforms the `Option` into a `Result`, mapping `Some(v)` to - /// `Ok(v)` and `None` to `Err(err)`. + /// Transforms the `Option` into a [`Result`], mapping `Some(v)` to + /// [`Ok(v)`] and `None` to [`Err(err)`][Err]. + /// + /// [`Result`]: ../../std/result/enum.Result.html + /// [`Ok(v)`]: ../../std/result/enum.Result.html#variant.Ok + /// [Err]: ../../std/result/enum.Result.html#variant.Err /// /// # Examples /// @@ -453,8 +454,12 @@ impl Option { } } - /// Transforms the `Option` into a `Result`, mapping `Some(v)` to - /// `Ok(v)` and `None` to `Err(err())`. + /// Transforms the `Option` into a [`Result`], mapping `Some(v)` to + /// [`Ok(v)`] and `None` to [`Err(err())`][Err]. + /// + /// [`Result`]: ../../std/result/enum.Result.html + /// [`Ok(v)`]: ../../std/result/enum.Result.html#variant.Ok + /// [Err]: ../../std/result/enum.Result.html#variant.Err /// /// # Examples /// @@ -707,6 +712,7 @@ fn expect_failed(msg: &str) -> ! { #[stable(feature = "rust1", since = "1.0.0")] impl Default for Option { + /// Returns None. #[inline] fn default() -> Option { None } } @@ -796,8 +802,11 @@ impl DoubleEndedIterator for Item { } impl ExactSizeIterator for Item {} +impl FusedIterator for Item {} -/// An iterator over a reference of the contained item in an Option. +/// An iterator over a reference of the contained item in an [`Option`]. +/// +/// [`Option`]: enum.Option.html #[stable(feature = "rust1", since = "1.0.0")] #[derive(Debug)] pub struct Iter<'a, A: 'a> { inner: Item<&'a A> } @@ -821,6 +830,9 @@ impl<'a, A> DoubleEndedIterator for Iter<'a, A> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, A> ExactSizeIterator for Iter<'a, A> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, A> FusedIterator for Iter<'a, A> {} + #[stable(feature = "rust1", since = "1.0.0")] impl<'a, A> Clone for Iter<'a, A> { fn clone(&self) -> Iter<'a, A> { @@ -828,7 +840,9 @@ impl<'a, A> Clone for Iter<'a, A> { } } -/// An iterator over a mutable reference of the contained item in an Option. +/// An iterator over a mutable reference of the contained item in an [`Option`]. +/// +/// [`Option`]: enum.Option.html #[stable(feature = "rust1", since = "1.0.0")] #[derive(Debug)] pub struct IterMut<'a, A: 'a> { inner: Item<&'a mut A> } @@ -852,7 +866,12 @@ impl<'a, A> DoubleEndedIterator for IterMut<'a, A> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, A> ExactSizeIterator for IterMut<'a, A> {} -/// An iterator over the item contained inside an Option. +#[unstable(feature = "fused", issue = "35602")] +impl<'a, A> FusedIterator for IterMut<'a, A> {} + +/// An iterator over the item contained inside an [`Option`]. +/// +/// [`Option`]: enum.Option.html #[derive(Clone, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter { inner: Item } @@ -876,6 +895,9 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + ///////////////////////////////////////////////////////////////////////////// // FromIterator ///////////////////////////////////////////////////////////////////////////// diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index 925cdfec90..69682652a6 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -16,17 +16,14 @@ #![stable(feature = "rust1", since = "1.0.0")] -use clone::Clone; use intrinsics; use ops::{CoerceUnsized, Deref}; use fmt; use hash; -use option::Option::{self, Some, None}; -use marker::{Copy, PhantomData, Send, Sized, Sync, Unsize}; +use marker::{PhantomData, Unsize}; use mem; use nonzero::NonZero; -use cmp::{PartialEq, Eq, Ord, PartialOrd}; use cmp::Ordering::{self, Less, Equal, Greater}; // FIXME #19649: intrinsic docs don't render, so these have no docs :( @@ -128,7 +125,9 @@ pub unsafe fn replace(dest: *mut T, mut src: T) -> T { /// let x = 12; /// let y = &x as *const i32; /// -/// unsafe { println!("{}", std::ptr::read(y)); } +/// unsafe { +/// assert_eq!(std::ptr::read(y), 12); +/// } /// ``` #[inline(always)] #[stable(feature = "rust1", since = "1.0.0")] @@ -138,21 +137,6 @@ pub unsafe fn read(src: *const T) -> T { tmp } -#[allow(missing_docs)] -#[inline(always)] -#[unstable(feature = "filling_drop", - reason = "may play a larger role in std::ptr future extensions", - issue = "5016")] -pub unsafe fn read_and_drop(dest: *mut T) -> T { - // Copy the data out from `dest`: - let tmp = read(&*dest); - - // Now mark `dest` as dropped: - write_bytes(dest, mem::POST_DROP_U8, 1); - - tmp -} - /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// @@ -178,7 +162,7 @@ pub unsafe fn read_and_drop(dest: *mut T) -> T { /// /// unsafe { /// std::ptr::write(y, z); -/// println!("{}", std::ptr::read(y)); +/// assert_eq!(std::ptr::read(y), 12); /// } /// ``` #[inline] @@ -220,7 +204,9 @@ pub unsafe fn write(dst: *mut T, src: T) { /// let x = 12; /// let y = &x as *const i32; /// -/// unsafe { println!("{}", std::ptr::read_volatile(y)); } +/// unsafe { +/// assert_eq!(std::ptr::read_volatile(y), 12); +/// } /// ``` #[inline] #[stable(feature = "volatile", since = "1.9.0")] @@ -266,7 +252,7 @@ pub unsafe fn read_volatile(src: *const T) -> T { /// /// unsafe { /// std::ptr::write_volatile(y, z); -/// println!("{}", std::ptr::read_volatile(y)); +/// assert_eq!(std::ptr::read_volatile(y), 12); /// } /// ``` #[inline] @@ -493,6 +479,40 @@ impl PartialEq for *mut T { #[stable(feature = "rust1", since = "1.0.0")] impl Eq for *mut T {} +/// Compare raw pointers for equality. +/// +/// This is the same as using the `==` operator, but less generic: +/// the arguments have to be `*const T` raw pointers, +/// not anything that implements `PartialEq`. +/// +/// This can be used to compare `&T` references (which coerce to `*const T` implicitly) +/// by their address rather than comparing the values they point to +/// (which is what the `PartialEq for &T` implementation does). +/// +/// # Examples +/// +/// ``` +/// #![feature(ptr_eq)] +/// use std::ptr; +/// +/// let five = 5; +/// let other_five = 5; +/// let five_ref = &five; +/// let same_five_ref = &five; +/// let other_five_ref = &other_five; +/// +/// assert!(five_ref == same_five_ref); +/// assert!(five_ref == other_five_ref); +/// +/// assert!(ptr::eq(five_ref, same_five_ref)); +/// assert!(!ptr::eq(five_ref, other_five_ref)); +/// ``` +#[unstable(feature = "ptr_eq", reason = "newly added", issue = "36497")] +#[inline] +pub fn eq(a: *const T, b: *const T) -> bool { + a == b +} + #[stable(feature = "rust1", since = "1.0.0")] impl Clone for *const T { #[inline] diff --git a/src/libcore/result.rs b/src/libcore/result.rs index c7ca70fc16..9684525929 100644 --- a/src/libcore/result.rs +++ b/src/libcore/result.rs @@ -10,9 +10,9 @@ //! Error handling with the `Result` type. //! -//! `Result` is the type used for returning and propagating -//! errors. It is an enum with the variants, `Ok(T)`, representing -//! success and containing a value, and `Err(E)`, representing error +//! [`Result`][`Result`] is the type used for returning and propagating +//! errors. It is an enum with the variants, [`Ok(T)`], representing +//! success and containing a value, and [`Err(E)`], representing error //! and containing an error value. //! //! ``` @@ -23,11 +23,11 @@ //! } //! ``` //! -//! Functions return `Result` whenever errors are expected and -//! recoverable. In the `std` crate `Result` is most prominently used +//! Functions return [`Result`] whenever errors are expected and +//! recoverable. In the `std` crate, [`Result`] is most prominently used //! for [I/O](../../std/io/index.html). //! -//! A simple function returning `Result` might be +//! A simple function returning [`Result`] might be //! defined and used like so: //! //! ``` @@ -50,8 +50,8 @@ //! } //! ``` //! -//! Pattern matching on `Result`s is clear and straightforward for -//! simple cases, but `Result` comes with some convenience methods +//! Pattern matching on [`Result`]s is clear and straightforward for +//! simple cases, but [`Result`] comes with some convenience methods //! that make working with it more succinct. //! //! ``` @@ -80,14 +80,14 @@ //! //! A common problem with using return values to indicate errors is //! that it is easy to ignore the return value, thus failing to handle -//! the error. Result is annotated with the #[must_use] attribute, +//! the error. [`Result`] is annotated with the `#[must_use]` attribute, //! which will cause the compiler to issue a warning when a Result -//! value is ignored. This makes `Result` especially useful with +//! value is ignored. This makes [`Result`] especially useful with //! functions that may encounter errors but don't otherwise return a //! useful value. //! -//! Consider the `write_all` method defined for I/O types -//! by the [`Write`](../../std/io/trait.Write.html) trait: +//! Consider the [`write_all`] method defined for I/O types +//! by the [`Write`] trait: //! //! ``` //! use std::io; @@ -97,8 +97,8 @@ //! } //! ``` //! -//! *Note: The actual definition of `Write` uses `io::Result`, which -//! is just a synonym for `Result`.* +//! *Note: The actual definition of [`Write`] uses [`io::Result`], which +//! is just a synonym for [`Result`]``.* //! //! This method doesn't produce a value, but the write may //! fail. It's crucial to handle the error case, and *not* write @@ -119,7 +119,7 @@ //! warning (by default, controlled by the `unused_must_use` lint). //! //! You might instead, if you don't want to handle the error, simply -//! assert success with `expect`. This will panic if the +//! assert success with [`expect`]. This will panic if the //! write fails, providing a marginally useful message indicating why: //! //! ```{.no_run} @@ -139,7 +139,7 @@ //! assert!(file.write_all(b"important message").is_ok()); //! ``` //! -//! Or propagate the error up the call stack with `try!`: +//! Or propagate the error up the call stack with [`try!`]: //! //! ``` //! # use std::fs::File; @@ -156,7 +156,7 @@ //! # The `try!` macro //! //! When writing code that calls many functions that return the -//! `Result` type, the error handling can be tedious. The `try!` +//! [`Result`] type, the error handling can be tedious. The [`try!`] //! macro hides some of the boilerplate of propagating errors up the //! call stack. //! @@ -219,9 +219,9 @@ //! //! *It's much nicer!* //! -//! Wrapping an expression in `try!` will result in the unwrapped -//! success (`Ok`) value, unless the result is `Err`, in which case -//! `Err` is returned early from the enclosing function. Its simple definition +//! Wrapping an expression in [`try!`] will result in the unwrapped +//! success ([`Ok`]) value, unless the result is [`Err`], in which case +//! [`Err`] is returned early from the enclosing function. Its simple definition //! makes it clear: //! //! ``` @@ -230,19 +230,26 @@ //! } //! ``` //! -//! `try!` is imported by the prelude and is available everywhere, but it can only -//! be used in functions that return `Result` because of the early return of -//! `Err` that it provides. +//! [`try!`] is imported by the prelude and is available everywhere, but it can only +//! be used in functions that return [`Result`] because of the early return of +//! [`Err`] that it provides. +//! +//! [`expect`]: enum.Result.html#method.expect +//! [`Write`]: ../../std/io/trait.Write.html +//! [`write_all`]: ../../std/io/trait.Write.html#method.write_all +//! [`io::Result`]: ../../std/io/type.Result.html +//! [`try!`]: ../../std/macro.try.html +//! [`Result`]: enum.Result.html +//! [`Ok(T)`]: enum.Result.html#variant.Ok +//! [`Err(E)`]: enum.Result.html#variant.Err +//! [`io::Error`]: ../../std/io/struct.Error.html +//! [`Ok`]: enum.Result.html#variant.Ok +//! [`Err`]: enum.Result.html#variant.Err #![stable(feature = "rust1", since = "1.0.0")] -use self::Result::{Ok, Err}; - -use clone::Clone; use fmt; -use iter::{Iterator, DoubleEndedIterator, FromIterator, ExactSizeIterator, IntoIterator}; -use ops::FnOnce; -use option::Option::{self, None, Some}; +use iter::{FromIterator, FusedIterator}; /// `Result` is a type that represents either success (`Ok`) or failure (`Err`). /// @@ -269,7 +276,7 @@ impl Result { // Querying the contained values ///////////////////////////////////////////////////////////////////////// - /// Returns true if the result is `Ok` + /// Returns true if the result is `Ok`. /// /// # Examples /// @@ -291,7 +298,7 @@ impl Result { } } - /// Returns true if the result is `Err` + /// Returns true if the result is `Err`. /// /// # Examples /// @@ -314,11 +321,13 @@ impl Result { // Adapter for each variant ///////////////////////////////////////////////////////////////////////// - /// Converts from `Result` to `Option` + /// Converts from `Result` to [`Option`]. /// - /// Converts `self` into an `Option`, consuming `self`, + /// Converts `self` into an [`Option`], consuming `self`, /// and discarding the error, if any. /// + /// [`Option`]: ../../std/option/enum.Option.html + /// /// # Examples /// /// Basic usage: @@ -339,11 +348,13 @@ impl Result { } } - /// Converts from `Result` to `Option` + /// Converts from `Result` to [`Option`]. /// - /// Converts `self` into an `Option`, consuming `self`, + /// Converts `self` into an [`Option`], consuming `self`, /// and discarding the success value, if any. /// + /// [`Option`]: ../../std/option/enum.Option.html + /// /// # Examples /// /// Basic usage: @@ -368,7 +379,7 @@ impl Result { // Adapter for working with references ///////////////////////////////////////////////////////////////////////// - /// Converts from `Result` to `Result<&T, &E>` + /// Converts from `Result` to `Result<&T, &E>`. /// /// Produces a new `Result`, containing a reference /// into the original, leaving the original in place. @@ -393,7 +404,7 @@ impl Result { } } - /// Converts from `Result` to `Result<&mut T, &mut E>` + /// Converts from `Result` to `Result<&mut T, &mut E>`. /// /// # Examples /// @@ -568,7 +579,7 @@ impl Result { /// Calls `op` if the result is `Ok`, otherwise returns the `Err` value of `self`. /// - /// This function can be used for control flow based on result values. + /// This function can be used for control flow based on `Result` values. /// /// # Examples /// @@ -651,7 +662,7 @@ impl Result { } /// Unwraps a result, yielding the content of an `Ok`. - /// Else it returns `optb`. + /// Else, it returns `optb`. /// /// # Examples /// @@ -842,7 +853,10 @@ impl<'a, T, E> IntoIterator for &'a mut Result { // The Result Iterators ///////////////////////////////////////////////////////////////////////////// -/// An iterator over a reference to the `Ok` variant of a `Result`. +/// An iterator over a reference to the [`Ok`] variant of a [`Result`]. +/// +/// [`Ok`]: enum.Result.html#variant.Ok +/// [`Result`]: enum.Result.html #[derive(Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, T: 'a> { inner: Option<&'a T> } @@ -869,12 +883,18 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Iter<'a, T> {} + #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { Iter { inner: self.inner } } } -/// An iterator over a mutable reference to the `Ok` variant of a `Result`. +/// An iterator over a mutable reference to the [`Ok`] variant of a [`Result`]. +/// +/// [`Ok`]: enum.Result.html#variant.Ok +/// [`Result`]: enum.Result.html #[derive(Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T: 'a> { inner: Option<&'a mut T> } @@ -901,7 +921,17 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} -/// An iterator over the value in a `Ok` variant of a `Result`. +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for IterMut<'a, T> {} + +/// An iterator over the value in a [`Ok`] variant of a [`Result`]. This struct is +/// created by the [`into_iter`] method on [`Result`][`Result`] (provided by +/// the [`IntoIterator`] trait). +/// +/// [`Ok`]: enum.Result.html#variant.Ok +/// [`Result`]: enum.Result.html +/// [`into_iter`]: ../iter/trait.IntoIterator.html#tymethod.into_iter +/// [`IntoIterator`]: ../iter/trait.IntoIterator.html #[derive(Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter { inner: Option } @@ -928,6 +958,9 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + ///////////////////////////////////////////////////////////////////////////// // FromIterator ///////////////////////////////////////////////////////////////////////////// diff --git a/src/libcore/slice.rs b/src/libcore/slice.rs index 3141c289e9..d1df56905d 100644 --- a/src/libcore/slice.rs +++ b/src/libcore/slice.rs @@ -33,23 +33,15 @@ // * The `raw` and `bytes` submodules. // * Boilerplate trait implementations. -use clone::Clone; -use cmp::{Ordering, PartialEq, PartialOrd, Eq, Ord}; -use cmp::Ordering::{Less, Equal, Greater}; +use cmp::Ordering::{self, Less, Equal, Greater}; use cmp; -use default::Default; use fmt; use intrinsics::assume; use iter::*; -use ops::{FnMut, self}; -use ops::RangeFull; -use option::Option; -use option::Option::{None, Some}; -use result::Result; -use result::Result::{Ok, Err}; +use ops::{self, RangeFull}; use ptr; use mem; -use marker::{Copy, Send, Sync, self}; +use marker; use iter_private::TrustedRandomAccess; #[repr(C)] @@ -528,8 +520,8 @@ impl ops::Index for [T] { type Output = T; fn index(&self, index: usize) -> &T { - assert!(index < self.len()); - unsafe { self.get_unchecked(index) } + // NB built-in indexing + &(*self)[index] } } @@ -538,8 +530,8 @@ impl ops::Index for [T] { impl ops::IndexMut for [T] { #[inline] fn index_mut(&mut self, index: usize) -> &mut T { - assert!(index < self.len()); - unsafe { self.get_unchecked_mut(index) } + // NB built-in indexing + &mut (*self)[index] } } @@ -763,11 +755,13 @@ impl ops::IndexMut> for [T] { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Default for &'a [T] { + /// Creates an empty slice. fn default() -> &'a [T] { &[] } } #[stable(feature = "mut_slice_default", since = "1.5.0")] impl<'a, T> Default for &'a mut [T] { + /// Creates a mutable empty slice. fn default() -> &'a mut [T] { &mut [] } } @@ -991,11 +985,21 @@ iterator!{struct Iter -> *const T, &'a T} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Iter<'a, T> {} + #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { Iter { ptr: self.ptr, end: self.end, _marker: self._marker } } } +#[stable(feature = "slice_iter_as_ref", since = "1.12.0")] +impl<'a, T> AsRef<[T]> for Iter<'a, T> { + fn as_ref(&self) -> &[T] { + self.as_slice() + } +} + /// Mutable slice iterator. /// /// This struct is created by the [`iter_mut`] method on [slices]. @@ -1102,6 +1106,9 @@ iterator!{struct IterMut -> *mut T, &'a mut T} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for IterMut<'a, T> {} + /// An internal abstraction over the splitting iterators, so that /// splitn, splitn_mut etc can be implemented once. #[doc(hidden)] @@ -1194,6 +1201,9 @@ impl<'a, T, P> SplitIter for Split<'a, T, P> where P: FnMut(&T) -> bool { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T, P> FusedIterator for Split<'a, T, P> where P: FnMut(&T) -> bool {} + /// An iterator over the subslices of the vector which are separated /// by elements that match `pred`. #[stable(feature = "rust1", since = "1.0.0")] @@ -1284,6 +1294,9 @@ impl<'a, T, P> DoubleEndedIterator for SplitMut<'a, T, P> where } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T, P> FusedIterator for SplitMut<'a, T, P> where P: FnMut(&T) -> bool {} + /// An private iterator over subslices separated by elements that /// match a predicate function, splitting at most a fixed number of /// times. @@ -1400,6 +1413,10 @@ macro_rules! forward_iterator { self.inner.size_hint() } } + + #[unstable(feature = "fused", issue = "35602")] + impl<'a, $elem, P> FusedIterator for $name<'a, $elem, P> + where P: FnMut(&T) -> bool {} } } @@ -1498,6 +1515,9 @@ impl<'a, T> DoubleEndedIterator for Windows<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Windows<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Windows<'a, T> {} + /// An iterator over a slice in (non-overlapping) chunks (`size` elements at a /// time). /// @@ -1601,6 +1621,9 @@ impl<'a, T> DoubleEndedIterator for Chunks<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Chunks<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Chunks<'a, T> {} + /// An iterator over a slice in (non-overlapping) mutable chunks (`size` /// elements at a time). When the slice len is not evenly divided by the chunk /// size, the last slice of the iteration will be the remainder. @@ -1696,6 +1719,9 @@ impl<'a, T> DoubleEndedIterator for ChunksMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for ChunksMut<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for ChunksMut<'a, T> {} + // // Free functions // @@ -1797,7 +1823,8 @@ impl PartialOrd for [T] { // intermediate trait for specialization of slice's PartialEq trait SlicePartialEq { fn equal(&self, other: &[B]) -> bool; - fn not_equal(&self, other: &[B]) -> bool; + + fn not_equal(&self, other: &[B]) -> bool { !self.equal(other) } } // Generic slice equality @@ -1817,20 +1844,6 @@ impl SlicePartialEq for [A] true } - - default fn not_equal(&self, other: &[B]) -> bool { - if self.len() != other.len() { - return true; - } - - for i in 0..self.len() { - if self[i].ne(&other[i]) { - return true; - } - } - - false - } } // Use memcmp for bytewise equality when the types allow @@ -1850,10 +1863,6 @@ impl SlicePartialEq for [A] other.as_ptr() as *const u8, size) == 0 } } - - fn not_equal(&self, other: &[A]) -> bool { - !self.equal(other) - } } #[doc(hidden)] diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs index fdcadd43a0..d63d2d64fe 100644 --- a/src/libcore/str/mod.rs +++ b/src/libcore/str/mod.rs @@ -18,18 +18,10 @@ use self::pattern::Pattern; use self::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher}; use char; -use clone::Clone; -use convert::AsRef; -use default::Default; use fmt; -use iter::ExactSizeIterator; -use iter::{Map, Cloned, Iterator, DoubleEndedIterator}; -use marker::Sized; +use iter::{Map, Cloned, FusedIterator}; use mem; -use ops::{Fn, FnMut, FnOnce}; -use option::Option::{self, None, Some}; -use result::Result::{self, Ok, Err}; -use slice::{self, SliceExt}; +use slice; pub mod pattern; @@ -109,7 +101,7 @@ impl FromStr for bool { } /// An error returned when parsing a `bool` from a string fails. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] #[stable(feature = "rust1", since = "1.0.0")] pub struct ParseBoolError { _priv: () } @@ -388,8 +380,9 @@ pub fn next_code_point<'a, I: Iterator>(bytes: &mut I) -> Option< /// Reads the last code point out of a byte iterator (assuming a /// UTF-8-like encoding). #[inline] -fn next_code_point_reverse<'a, - I: DoubleEndedIterator>(bytes: &mut I) -> Option { +fn next_code_point_reverse<'a, I>(bytes: &mut I) -> Option + where I: DoubleEndedIterator, +{ // Decode UTF-8 let w = match bytes.next_back() { None => return None, @@ -454,6 +447,9 @@ impl<'a> DoubleEndedIterator for Chars<'a> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for Chars<'a> {} + impl<'a> Chars<'a> { /// View the underlying data as a subslice of the original data. /// @@ -525,6 +521,9 @@ impl<'a> DoubleEndedIterator for CharIndices<'a> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for CharIndices<'a> {} + impl<'a> CharIndices<'a> { /// View the underlying data as a subslice of the original data. /// @@ -593,6 +592,9 @@ impl<'a> ExactSizeIterator for Bytes<'a> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for Bytes<'a> {} + /// This macro generates a Clone impl for string pattern API /// wrapper types of the form X<'a, P> macro_rules! derive_pattern_clone { @@ -739,6 +741,13 @@ macro_rules! generate_pattern_iterators { } } + #[unstable(feature = "fused", issue = "35602")] + impl<'a, P: Pattern<'a>> FusedIterator for $forward_iterator<'a, P> {} + + #[unstable(feature = "fused", issue = "35602")] + impl<'a, P: Pattern<'a>> FusedIterator for $reverse_iterator<'a, P> + where P::Searcher: ReverseSearcher<'a> {} + generate_pattern_iterators!($($t)* with $(#[$common_stability_attribute])*, $forward_iterator, $reverse_iterator, $iterty); @@ -1088,6 +1097,9 @@ impl<'a> DoubleEndedIterator for Lines<'a> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for Lines<'a> {} + /// Created with the method [`lines_any()`]. /// /// [`lines_any()`]: ../../std/primitive.str.html#method.lines_any @@ -1151,6 +1163,10 @@ impl<'a> DoubleEndedIterator for LinesAny<'a> { } } +#[unstable(feature = "fused", issue = "35602")] +#[allow(deprecated)] +impl<'a> FusedIterator for LinesAny<'a> {} + /* Section: Comparing strings */ @@ -1314,11 +1330,9 @@ Section: Trait implementations */ mod traits { - use cmp::{Ord, Ordering, PartialEq, PartialOrd, Eq}; - use option::Option; - use option::Option::Some; + use cmp::Ordering; use ops; - use str::{StrExt, eq_slice}; + use str::eq_slice; #[stable(feature = "rust1", since = "1.0.0")] impl Ord for str { @@ -1973,5 +1987,6 @@ impl AsRef<[u8]> for str { #[stable(feature = "rust1", since = "1.0.0")] impl<'a> Default for &'a str { + /// Creates an empty str fn default() -> &'a str { "" } } diff --git a/src/libcore/str/pattern.rs b/src/libcore/str/pattern.rs index 53804c611e..7dced2ba75 100644 --- a/src/libcore/str/pattern.rs +++ b/src/libcore/str/pattern.rs @@ -17,8 +17,6 @@ reason = "API not fully fleshed out and ready to be stabilized", issue = "27721")] -use prelude::v1::*; - use cmp; use fmt; use usize; diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index 5701a89d8b..f5f37be52d 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -79,17 +79,13 @@ use self::Ordering::*; -use marker::{Send, Sync}; - use intrinsics; use cell::UnsafeCell; - -use result::Result::{self, Ok, Err}; - -use default::Default; use fmt; /// A boolean type which can be safely shared between threads. +/// +/// This type has the same in-memory representation as a `bool`. #[cfg(target_has_atomic = "8")] #[stable(feature = "rust1", since = "1.0.0")] pub struct AtomicBool { @@ -99,6 +95,7 @@ pub struct AtomicBool { #[cfg(target_has_atomic = "8")] #[stable(feature = "rust1", since = "1.0.0")] impl Default for AtomicBool { + /// Creates an `AtomicBool` initialised as false. fn default() -> Self { Self::new(false) } @@ -110,6 +107,8 @@ impl Default for AtomicBool { unsafe impl Sync for AtomicBool {} /// A raw pointer type which can be safely shared between threads. +/// +/// This type has the same in-memory representation as a `*mut T`. #[cfg(target_has_atomic = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] pub struct AtomicPtr { @@ -119,6 +118,7 @@ pub struct AtomicPtr { #[cfg(target_has_atomic = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] impl Default for AtomicPtr { + /// Creates a null `AtomicPtr`. fn default() -> AtomicPtr { AtomicPtr::new(::ptr::null_mut()) } @@ -191,6 +191,48 @@ impl AtomicBool { AtomicBool { v: UnsafeCell::new(v as u8) } } + /// Returns a mutable reference to the underlying `bool`. + /// + /// This is safe because the mutable reference guarantees that no other threads are + /// concurrently accessing the atomic data. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_access)] + /// use std::sync::atomic::{AtomicBool, Ordering}; + /// + /// let mut some_bool = AtomicBool::new(true); + /// assert_eq!(*some_bool.get_mut(), true); + /// *some_bool.get_mut() = false; + /// assert_eq!(some_bool.load(Ordering::SeqCst), false); + /// ``` + #[inline] + #[unstable(feature = "atomic_access", issue = "35603")] + pub fn get_mut(&mut self) -> &mut bool { + unsafe { &mut *(self.v.get() as *mut bool) } + } + + /// Consumes the atomic and returns the contained value. + /// + /// This is safe because passing `self` by value guarantees that no other threads are + /// concurrently accessing the atomic data. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_access)] + /// use std::sync::atomic::AtomicBool; + /// + /// let some_bool = AtomicBool::new(true); + /// assert_eq!(some_bool.into_inner(), true); + /// ``` + #[inline] + #[unstable(feature = "atomic_access", issue = "35603")] + pub fn into_inner(self) -> bool { + unsafe { self.v.into_inner() != 0 } + } + /// Loads a value from the bool. /// /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. @@ -528,6 +570,47 @@ impl AtomicPtr { AtomicPtr { p: UnsafeCell::new(p) } } + /// Returns a mutable reference to the underlying pointer. + /// + /// This is safe because the mutable reference guarantees that no other threads are + /// concurrently accessing the atomic data. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_access)] + /// use std::sync::atomic::{AtomicPtr, Ordering}; + /// + /// let mut atomic_ptr = AtomicPtr::new(&mut 10); + /// *atomic_ptr.get_mut() = &mut 5; + /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5); + /// ``` + #[inline] + #[unstable(feature = "atomic_access", issue = "35603")] + pub fn get_mut(&mut self) -> &mut *mut T { + unsafe { &mut *self.p.get() } + } + + /// Consumes the atomic and returns the contained value. + /// + /// This is safe because passing `self` by value guarantees that no other threads are + /// concurrently accessing the atomic data. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_access)] + /// use std::sync::atomic::AtomicPtr; + /// + /// let atomic_ptr = AtomicPtr::new(&mut 5); + /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5); + /// ``` + #[inline] + #[unstable(feature = "atomic_access", issue = "35603")] + pub fn into_inner(self) -> *mut T { + unsafe { self.p.into_inner() } + } + /// Loads a value from the pointer. /// /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. @@ -730,8 +813,11 @@ macro_rules! atomic_int { ($stable:meta, $stable_cxchg:meta, $stable_debug:meta, + $stable_access:meta, $int_type:ident $atomic_type:ident $atomic_init:ident) => { /// An integer type which can be safely shared between threads. + /// + /// This type has the same in-memory representation as the underlying integer type. #[$stable] pub struct $atomic_type { v: UnsafeCell<$int_type>, @@ -777,6 +863,48 @@ macro_rules! atomic_int { $atomic_type {v: UnsafeCell::new(v)} } + /// Returns a mutable reference to the underlying integer. + /// + /// This is safe because the mutable reference guarantees that no other threads are + /// concurrently accessing the atomic data. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_access)] + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let mut some_isize = AtomicIsize::new(10); + /// assert_eq!(*some_isize.get_mut(), 10); + /// *some_isize.get_mut() = 5; + /// assert_eq!(some_isize.load(Ordering::SeqCst), 5); + /// ``` + #[inline] + #[$stable_access] + pub fn get_mut(&mut self) -> &mut $int_type { + unsafe { &mut *self.v.get() } + } + + /// Consumes the atomic and returns the contained value. + /// + /// This is safe because passing `self` by value guarantees that no other threads are + /// concurrently accessing the atomic data. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_access)] + /// use std::sync::atomic::AtomicIsize; + /// + /// let some_isize = AtomicIsize::new(5); + /// assert_eq!(some_isize.into_inner(), 5); + /// ``` + #[inline] + #[$stable_access] + pub fn into_inner(self) -> $int_type { + unsafe { self.v.into_inner() } + } + /// Loads a value from the atomic integer. /// /// `load` takes an `Ordering` argument which describes the memory ordering of this @@ -1057,6 +1185,7 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), i8 AtomicI8 ATOMIC_I8_INIT } #[cfg(target_has_atomic = "8")] @@ -1064,6 +1193,7 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), u8 AtomicU8 ATOMIC_U8_INIT } #[cfg(target_has_atomic = "16")] @@ -1071,6 +1201,7 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), i16 AtomicI16 ATOMIC_I16_INIT } #[cfg(target_has_atomic = "16")] @@ -1078,6 +1209,7 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), u16 AtomicU16 ATOMIC_U16_INIT } #[cfg(target_has_atomic = "32")] @@ -1085,6 +1217,7 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), i32 AtomicI32 ATOMIC_I32_INIT } #[cfg(target_has_atomic = "32")] @@ -1092,6 +1225,7 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), u32 AtomicU32 ATOMIC_U32_INIT } #[cfg(target_has_atomic = "64")] @@ -1099,6 +1233,7 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), i64 AtomicI64 ATOMIC_I64_INIT } #[cfg(target_has_atomic = "64")] @@ -1106,6 +1241,7 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), u64 AtomicU64 ATOMIC_U64_INIT } #[cfg(target_has_atomic = "ptr")] @@ -1113,6 +1249,7 @@ atomic_int!{ stable(feature = "rust1", since = "1.0.0"), stable(feature = "extended_compare_and_swap", since = "1.10.0"), stable(feature = "atomic_debug", since = "1.3.0"), + unstable(feature = "atomic_access", issue = "35603"), isize AtomicIsize ATOMIC_ISIZE_INIT } #[cfg(target_has_atomic = "ptr")] @@ -1120,6 +1257,7 @@ atomic_int!{ stable(feature = "rust1", since = "1.0.0"), stable(feature = "extended_compare_and_swap", since = "1.10.0"), stable(feature = "atomic_debug", since = "1.3.0"), + unstable(feature = "atomic_access", issue = "35603"), usize AtomicUsize ATOMIC_USIZE_INIT } diff --git a/src/libcore/tuple.rs b/src/libcore/tuple.rs index abaabfd129..c3608b60a3 100644 --- a/src/libcore/tuple.rs +++ b/src/libcore/tuple.rs @@ -10,12 +10,8 @@ // See src/libstd/primitive_docs.rs for documentation. -use clone::Clone; use cmp::*; use cmp::Ordering::*; -use default::Default; -use option::Option; -use option::Option::Some; // FIXME(#19630) Remove this work-around macro_rules! e { diff --git a/src/libcoretest/char.rs b/src/libcoretest/char.rs index 4632419336..199437a431 100644 --- a/src/libcoretest/char.rs +++ b/src/libcoretest/char.rs @@ -9,6 +9,24 @@ // except according to those terms. use std::char; +use std::convert::TryFrom; + +#[test] +fn test_convert() { + assert_eq!(u32::from('a'), 0x61); + assert_eq!(char::from(b'\0'), '\0'); + assert_eq!(char::from(b'a'), 'a'); + assert_eq!(char::from(b'\xFF'), '\u{FF}'); + assert_eq!(char::try_from(0_u32), Ok('\0')); + assert_eq!(char::try_from(0x61_u32), Ok('a')); + assert_eq!(char::try_from(0xD7FF_u32), Ok('\u{D7FF}')); + assert!(char::try_from(0xD800_u32).is_err()); + assert!(char::try_from(0xDFFF_u32).is_err()); + assert_eq!(char::try_from(0xE000_u32), Ok('\u{E000}')); + assert_eq!(char::try_from(0x10FFFF_u32), Ok('\u{10FFFF}')); + assert!(char::try_from(0x110000_u32).is_err()); + assert!(char::try_from(0xFFFF_FFFF_u32).is_err()); +} #[test] fn test_is_lowercase() { @@ -358,29 +376,50 @@ fn eu_iterator_specializations() { #[test] fn test_decode_utf8() { - use core::char::*; - use core::iter::FromIterator; - - for &(str, bs) in [("", &[] as &[u8]), - ("A", &[0x41u8] as &[u8]), - ("�", &[0xC1u8, 0x81u8] as &[u8]), - ("♥", &[0xE2u8, 0x99u8, 0xA5u8]), - ("♥A", &[0xE2u8, 0x99u8, 0xA5u8, 0x41u8] as &[u8]), - ("�", &[0xE2u8, 0x99u8] as &[u8]), - ("�A", &[0xE2u8, 0x99u8, 0x41u8] as &[u8]), - ("�", &[0xC0u8] as &[u8]), - ("�A", &[0xC0u8, 0x41u8] as &[u8]), - ("�", &[0x80u8] as &[u8]), - ("�A", &[0x80u8, 0x41u8] as &[u8]), - ("�", &[0xFEu8] as &[u8]), - ("�A", &[0xFEu8, 0x41u8] as &[u8]), - ("�", &[0xFFu8] as &[u8]), - ("�A", &[0xFFu8, 0x41u8] as &[u8])].into_iter() { - assert!(Iterator::eq(str.chars(), - decode_utf8(bs.into_iter().map(|&b|b)) - .map(|r_b| r_b.unwrap_or('\u{FFFD}'))), - "chars = {}, bytes = {:?}, decoded = {:?}", str, bs, - Vec::from_iter(decode_utf8(bs.into_iter().map(|&b|b)) - .map(|r_b| r_b.unwrap_or('\u{FFFD}')))); + macro_rules! assert_decode_utf8 { + ($input_bytes: expr, $expected_str: expr) => { + let input_bytes: &[u8] = &$input_bytes; + let s = char::decode_utf8(input_bytes.iter().cloned()) + .map(|r_b| r_b.unwrap_or('\u{FFFD}')) + .collect::(); + assert_eq!(s, $expected_str, + "input bytes: {:?}, expected str: {:?}, result: {:?}", + input_bytes, $expected_str, s); + assert_eq!(String::from_utf8_lossy(&$input_bytes), $expected_str); + } } + + assert_decode_utf8!([], ""); + assert_decode_utf8!([0x41], "A"); + assert_decode_utf8!([0xC1, 0x81], "��"); + assert_decode_utf8!([0xE2, 0x99, 0xA5], "♥"); + assert_decode_utf8!([0xE2, 0x99, 0xA5, 0x41], "♥A"); + assert_decode_utf8!([0xE2, 0x99], "�"); + assert_decode_utf8!([0xE2, 0x99, 0x41], "�A"); + assert_decode_utf8!([0xC0], "�"); + assert_decode_utf8!([0xC0, 0x41], "�A"); + assert_decode_utf8!([0x80], "�"); + assert_decode_utf8!([0x80, 0x41], "�A"); + assert_decode_utf8!([0xFE], "�"); + assert_decode_utf8!([0xFE, 0x41], "�A"); + assert_decode_utf8!([0xFF], "�"); + assert_decode_utf8!([0xFF, 0x41], "�A"); + assert_decode_utf8!([0xC0, 0x80], "��"); + + // Surrogates + assert_decode_utf8!([0xED, 0x9F, 0xBF], "\u{D7FF}"); + assert_decode_utf8!([0xED, 0xA0, 0x80], "���"); + assert_decode_utf8!([0xED, 0xBF, 0x80], "���"); + assert_decode_utf8!([0xEE, 0x80, 0x80], "\u{E000}"); + + // char::MAX + assert_decode_utf8!([0xF4, 0x8F, 0xBF, 0xBF], "\u{10FFFF}"); + assert_decode_utf8!([0xF4, 0x8F, 0xBF, 0x41], "�A"); + assert_decode_utf8!([0xF4, 0x90, 0x80, 0x80], "����"); + + // 5 and 6 bytes sequence + // Part of the original design of UTF-8, + // but invalid now that UTF-8 is artificially restricted to match the range of UTF-16. + assert_decode_utf8!([0xF8, 0x80, 0x80, 0x80, 0x80], "�����"); + assert_decode_utf8!([0xFC, 0x80, 0x80, 0x80, 0x80, 0x80], "������"); } diff --git a/src/libcoretest/hash/sip.rs b/src/libcoretest/hash/sip.rs index a5e6005545..b465d7de18 100644 --- a/src/libcoretest/hash/sip.rs +++ b/src/libcoretest/hash/sip.rs @@ -7,6 +7,9 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. + +#![allow(deprecated)] + use test::{Bencher, black_box}; use core::hash::{Hash, Hasher}; diff --git a/src/libcoretest/iter.rs b/src/libcoretest/iter.rs index a2848faa10..27eb25537f 100644 --- a/src/libcoretest/iter.rs +++ b/src/libcoretest/iter.rs @@ -664,12 +664,24 @@ fn test_max_by_key() { assert_eq!(*xs.iter().max_by_key(|x| x.abs()).unwrap(), -10); } +#[test] +fn test_max_by() { + let xs: &[isize] = &[-3, 0, 1, 5, -10]; + assert_eq!(*xs.iter().max_by(|x, y| x.abs().cmp(&y.abs())).unwrap(), -10); +} + #[test] fn test_min_by_key() { let xs: &[isize] = &[-3, 0, 1, 5, -10]; assert_eq!(*xs.iter().min_by_key(|x| x.abs()).unwrap(), 0); } +#[test] +fn test_min_by() { + let xs: &[isize] = &[-3, 0, 1, 5, -10]; + assert_eq!(*xs.iter().min_by(|x, y| x.abs().cmp(&y.abs())).unwrap(), 0); +} + #[test] fn test_by_ref() { let mut xs = 0..10; diff --git a/src/libcoretest/lib.rs b/src/libcoretest/lib.rs index 9116344c57..590bf478aa 100644 --- a/src/libcoretest/lib.rs +++ b/src/libcoretest/lib.rs @@ -32,6 +32,8 @@ #![feature(try_from)] #![feature(unicode)] #![feature(unique)] +#![feature(iter_max_by)] +#![feature(iter_min_by)] extern crate core; extern crate test; diff --git a/src/libcoretest/ptr.rs b/src/libcoretest/ptr.rs index e0a9f4e5d4..f7fe61896f 100644 --- a/src/libcoretest/ptr.rs +++ b/src/libcoretest/ptr.rs @@ -173,12 +173,16 @@ fn test_unsized_unique() { } #[test] -fn test_variadic_fnptr() { +#[allow(warnings)] +// Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the +// ABI, or even point to an actual executable code, because the function itself is never invoked. +#[no_mangle] +pub fn test_variadic_fnptr() { use core::hash::{Hash, SipHasher}; - extern "C" { - fn printf(_: *const u8, ...); + extern { + fn test_variadic_fnptr(_: u64, ...) -> f64; } - let p: unsafe extern "C" fn(*const u8, ...) = printf; + let p: unsafe extern fn(u64, ...) -> f64 = test_variadic_fnptr; let q = p.clone(); assert_eq!(p, q); assert!(!(p < q)); diff --git a/src/libgetopts/lib.rs b/src/libgetopts/lib.rs index eda2069975..42200795bb 100644 --- a/src/libgetopts/lib.rs +++ b/src/libgetopts/lib.rs @@ -279,7 +279,7 @@ impl OptGroup { }], } } - (_, _) => panic!("something is wrong with the long-form opt"), + _ => panic!("something is wrong with the long-form opt"), } } } diff --git a/src/libgraphviz/lib.rs b/src/libgraphviz/lib.rs index 74cc498a7d..95c46ec571 100644 --- a/src/libgraphviz/lib.rs +++ b/src/libgraphviz/lib.rs @@ -295,7 +295,7 @@ #![cfg_attr(not(stage0), deny(warnings))] #![feature(str_escape)] -#![feature(question_mark)] +#![cfg_attr(stage0, feature(question_mark))] use self::LabelText::*; diff --git a/src/liblibc/.travis.yml b/src/liblibc/.travis.yml index e258303a53..e02f9ca256 100644 --- a/src/liblibc/.travis.yml +++ b/src/liblibc/.travis.yml @@ -33,7 +33,7 @@ matrix: # build documentation - os: linux env: TARGET=x86_64-unknown-linux-gnu - rust: stable + rust: nightly script: sh ci/dox.sh # stable compat @@ -55,6 +55,9 @@ matrix: - os: linux env: TARGET=x86_64-unknown-linux-musl rust: stable + - os: linux + env: TARGET=i686-unknown-linux-musl + rust: stable - os: linux env: TARGET=arm-unknown-linux-gnueabihf rust: stable @@ -70,6 +73,18 @@ matrix: - os: linux env: TARGET=x86_64-rumprun-netbsd rust: stable + - os: linux + env: TARGET=powerpc-unknown-linux-gnu + rust: stable + - os: linux + env: TARGET=powerpc64-unknown-linux-gnu + rust: stable + - os: linux + env: TARGET=mips-unknown-linux-musl + rust: stable + - os: linux + env: TARGET=mipsel-unknown-linux-musl + rust: stable # beta - os: linux @@ -104,4 +119,4 @@ matrix: notifications: email: on_success: never - webhooks: http://buildbot.rust-lang.org/homu/travis + webhooks: https://buildbot.rust-lang.org/homu/travis diff --git a/src/liblibc/Cargo.lock b/src/liblibc/Cargo.lock new file mode 100644 index 0000000000..6313e804e9 --- /dev/null +++ b/src/liblibc/Cargo.lock @@ -0,0 +1,112 @@ +[root] +name = "libc-test" +version = "0.1.0" +dependencies = [ + "ctest 0.1.0 (git+https://github.com/alexcrichton/ctest)", + "libc 0.2.16", +] + +[[package]] +name = "bitflags" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "ctest" +version = "0.1.0" +source = "git+https://github.com/alexcrichton/ctest#2839e49847a6adca6e96cc81c46a1f03f8562ac0" +dependencies = [ + "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "gcc" +version = "0.3.35" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "generate-files" +version = "0.1.0" +dependencies = [ + "ctest 0.1.0 (git+https://github.com/alexcrichton/ctest)", +] + +[[package]] +name = "kernel32-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "libc" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.16" + +[[package]] +name = "log" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc-serialize" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "syntex_syntax" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "term" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-xid" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-build" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum bitflags 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "32866f4d103c4e438b1db1158aa1b1a80ee078e5d77a59a2f906fd62a577389c" +"checksum ctest 0.1.0 (git+https://github.com/alexcrichton/ctest)" = "" +"checksum gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "91ecd03771effb0c968fd6950b37e89476a578aaf1c70297d8e92b6516ec3312" +"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +"checksum libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "e32a70cf75e5846d53a673923498228bbec6a8624708a9ea5645f075d6276122" +"checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054" +"checksum rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "6159e4e6e559c81bd706afe9c8fd68f547d3e851ce12e76b1de7914bab61691b" +"checksum syntex_syntax 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8739e1a8b53efe7349917259f8ced15f797c89bf788a86e44f61addc0d1ecf68" +"checksum term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "f2077e54d38055cf1ca0fd7933a2e00cd3ec8f6fed352b2a377f06dcdaaf3281" +"checksum unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "36dff09cafb4ec7c8cf0023eb0b686cb6ce65499116a12201c9e11840ca01beb" +"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" +"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" diff --git a/src/liblibc/Cargo.toml b/src/liblibc/Cargo.toml index 6d564434f4..5dc0ec4e4e 100644 --- a/src/liblibc/Cargo.toml +++ b/src/liblibc/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "libc" -version = "0.2.14" +version = "0.2.16" authors = ["The Rust Project Developers"] license = "MIT/Apache-2.0" readme = "README.md" @@ -16,3 +16,6 @@ other common platform libraries. [features] default = ["use_std"] use_std = [] + +[workspace] +members = ["libc-test", "libc-test/generate-files"] diff --git a/src/liblibc/README.md b/src/liblibc/README.md index 09b9a567b4..5ea812320f 100644 --- a/src/liblibc/README.md +++ b/src/liblibc/README.md @@ -132,4 +132,6 @@ The following may be supported, but are not guaranteed to always work: * `i686-unknown-freebsd` * [`x86_64-unknown-bitrig`](https://doc.rust-lang.org/libc/x86_64-unknown-bitrig/libc/) * [`x86_64-unknown-dragonfly`](https://doc.rust-lang.org/libc/x86_64-unknown-dragonfly/libc/) + * `i686-unknown-haiku` + * `x86_64-unknown-haiku` * [`x86_64-unknown-netbsd`](https://doc.rust-lang.org/libc/x86_64-unknown-netbsd/libc/) diff --git a/src/liblibc/appveyor.yml b/src/liblibc/appveyor.yml index 93bd0f9362..a851bb87b6 100644 --- a/src/liblibc/appveyor.yml +++ b/src/liblibc/appveyor.yml @@ -7,9 +7,9 @@ environment: - TARGET: x86_64-pc-windows-msvc - TARGET: i686-pc-windows-msvc install: - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" - - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin + - curl -sSf -o rustup-init.exe https://win.rustup.rs/ + - rustup-init.exe -y --default-host %TARGET% + - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin - if defined MSYS2_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS2_BITS%\bin - rustc -V - cargo -V @@ -17,5 +17,9 @@ install: build: false test_script: - - cargo test - - cargo run --manifest-path libc-test/Cargo.toml + - cargo test --target %TARGET% + - cargo run --manifest-path libc-test/Cargo.toml --target %TARGET% + +cache: + - target + - C:\Users\appveyor\.cargo\registry diff --git a/src/liblibc/ci/docker/aarch64-unknown-linux-gnu/Dockerfile b/src/liblibc/ci/docker/aarch64-unknown-linux-gnu/Dockerfile index 1c7235cd0d..2ba69e1544 100644 --- a/src/liblibc/ci/docker/aarch64-unknown-linux-gnu/Dockerfile +++ b/src/liblibc/ci/docker/aarch64-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:14.04 +FROM ubuntu:16.10 RUN apt-get update RUN apt-get install -y --no-install-recommends \ gcc libc6-dev ca-certificates \ diff --git a/src/liblibc/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile b/src/liblibc/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile index 3a858e3884..3824c04664 100644 --- a/src/liblibc/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile +++ b/src/liblibc/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:16.04 +FROM ubuntu:16.10 RUN apt-get update RUN apt-get install -y --no-install-recommends \ gcc libc6-dev ca-certificates \ diff --git a/src/liblibc/ci/docker/i686-unknown-linux-gnu/Dockerfile b/src/liblibc/ci/docker/i686-unknown-linux-gnu/Dockerfile index 63450ff9e0..c149d84072 100644 --- a/src/liblibc/ci/docker/i686-unknown-linux-gnu/Dockerfile +++ b/src/liblibc/ci/docker/i686-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:16.04 +FROM ubuntu:16.10 RUN apt-get update RUN apt-get install -y --no-install-recommends \ gcc-multilib libc6-dev ca-certificates diff --git a/src/liblibc/ci/docker/i686-unknown-linux-musl/Dockerfile b/src/liblibc/ci/docker/i686-unknown-linux-musl/Dockerfile new file mode 100644 index 0000000000..87459a1672 --- /dev/null +++ b/src/liblibc/ci/docker/i686-unknown-linux-musl/Dockerfile @@ -0,0 +1,22 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc make libc6-dev git curl ca-certificates +# Below we're cross-compiling musl for i686 using the system compiler on an +# x86_64 system. This is an awkward thing to be doing and so we have to jump +# through a couple hoops to get musl to be happy. In particular: +# +# * We specifically pass -m32 in CFLAGS and override CC when running ./configure, +# since otherwise the script will fail to find a compiler. +# * We manually unset CROSS_COMPILE when running make; otherwise the makefile +# will call the non-existent binary 'i686-ar'. +RUN curl https://www.musl-libc.org/releases/musl-1.1.15.tar.gz | \ + tar xzf - && \ + cd musl-1.1.15 && \ + CC=gcc CFLAGS=-m32 ./configure --prefix=/musl-i686 --disable-shared --target=i686 && \ + make CROSS_COMPILE= install -j4 && \ + cd .. && \ + rm -rf musl-1.1.15 +ENV PATH=$PATH:/musl-i686/bin:/rust/bin \ + CC_i686_unknown_linux_musl=musl-gcc diff --git a/src/liblibc/ci/docker/mips-unknown-linux-gnu/Dockerfile b/src/liblibc/ci/docker/mips-unknown-linux-gnu/Dockerfile index 8eb6a26b00..eea1f652c3 100644 --- a/src/liblibc/ci/docker/mips-unknown-linux-gnu/Dockerfile +++ b/src/liblibc/ci/docker/mips-unknown-linux-gnu/Dockerfile @@ -1,13 +1,10 @@ -FROM ubuntu:15.10 +FROM ubuntu:16.10 RUN apt-get update RUN apt-get install -y --no-install-recommends \ - software-properties-common gcc libc6-dev qemu-user -RUN add-apt-repository ppa:angelsl/mips-cross -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc-5-mips-linux-gnu libc6-dev-mips-cross + gcc libc6-dev qemu-user ca-certificates \ + gcc-mips-linux-gnu libc6-dev-mips-cross \ + qemu-system-mips -ENV CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_LINKER=mips-linux-gnu-gcc-5 \ - CC_mips_unknown_linux_gnu=mips-linux-gnu-gcc-5 \ +ENV CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_LINKER=mips-linux-gnu-gcc \ PATH=$PATH:/rust/bin diff --git a/src/liblibc/ci/docker/mips-unknown-linux-musl/Dockerfile b/src/liblibc/ci/docker/mips-unknown-linux-musl/Dockerfile new file mode 100644 index 0000000000..77c6adb435 --- /dev/null +++ b/src/liblibc/ci/docker/mips-unknown-linux-musl/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates qemu-system-mips curl \ + bzip2 + +RUN mkdir /toolchain +RUN curl -L https://downloads.openwrt.org/snapshots/trunk/ar71xx/generic/OpenWrt-SDK-ar71xx-generic_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 | \ + tar xjf - -C /toolchain --strip-components=1 + +ENV PATH=$PATH:/rust/bin:/toolchain/staging_dir/toolchain-mips_34kc_gcc-5.3.0_musl-1.1.15/bin \ + CC_mips_unknown_linux_musl=mips-openwrt-linux-gcc \ + CARGO_TARGET_MIPS_UNKNOWN_LINUX_MUSL_LINKER=mips-openwrt-linux-gcc diff --git a/src/liblibc/ci/docker/mipsel-unknown-linux-musl/Dockerfile b/src/liblibc/ci/docker/mipsel-unknown-linux-musl/Dockerfile new file mode 100644 index 0000000000..36c4d90ef6 --- /dev/null +++ b/src/liblibc/ci/docker/mipsel-unknown-linux-musl/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates qemu-system-mips curl \ + bzip2 + +RUN mkdir /toolchain +RUN curl -L https://downloads.openwrt.org/snapshots/trunk/malta/generic/OpenWrt-Toolchain-malta-le_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 | \ + tar xjf - -C /toolchain --strip-components=2 + +ENV PATH=$PATH:/rust/bin:/toolchain/bin \ + CC_mipsel_unknown_linux_musl=mipsel-openwrt-linux-gcc \ + CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_MUSL_LINKER=mipsel-openwrt-linux-gcc diff --git a/src/liblibc/ci/docker/powerpc-unknown-linux-gnu/Dockerfile b/src/liblibc/ci/docker/powerpc-unknown-linux-gnu/Dockerfile new file mode 100644 index 0000000000..d9d7db0f41 --- /dev/null +++ b/src/liblibc/ci/docker/powerpc-unknown-linux-gnu/Dockerfile @@ -0,0 +1,10 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates \ + gcc-powerpc-linux-gnu libc6-dev-powerpc-cross \ + qemu-system-ppc + +ENV CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_LINKER=powerpc-linux-gnu-gcc \ + PATH=$PATH:/rust/bin diff --git a/src/liblibc/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile b/src/liblibc/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile new file mode 100644 index 0000000000..df0e6057b4 --- /dev/null +++ b/src/liblibc/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile @@ -0,0 +1,11 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates \ + gcc-powerpc64-linux-gnu libc6-dev-ppc64-cross \ + qemu-system-ppc + +ENV CARGO_TARGET_POWERPC64_UNKNOWN_LINUX_GNU_LINKER=powerpc64-linux-gnu-gcc \ + CC=powerpc64-linux-gnu-gcc \ + PATH=$PATH:/rust/bin diff --git a/src/liblibc/ci/docker/x86_64-unknown-freebsd/Dockerfile b/src/liblibc/ci/docker/x86_64-unknown-freebsd/Dockerfile index bffcaa0ced..b127338222 100644 --- a/src/liblibc/ci/docker/x86_64-unknown-freebsd/Dockerfile +++ b/src/liblibc/ci/docker/x86_64-unknown-freebsd/Dockerfile @@ -3,11 +3,11 @@ USER root RUN apt-get update RUN apt-get install -y --no-install-recommends \ - qemu qemu-kvm kmod cpu-checker + qemu genext2fs ENTRYPOINT ["sh"] ENV PATH=$PATH:/rust/bin \ - QEMU=freebsd.qcow2 \ + QEMU=freebsd.qcow2.gz \ CAN_CROSS=1 \ CARGO_TARGET_X86_64_UNKNOWN_FREEBSD_LINKER=x86_64-unknown-freebsd10-gcc diff --git a/src/liblibc/ci/docker/x86_64-unknown-linux-gnu/Dockerfile b/src/liblibc/ci/docker/x86_64-unknown-linux-gnu/Dockerfile index 294a0621ce..4af3f834cb 100644 --- a/src/liblibc/ci/docker/x86_64-unknown-linux-gnu/Dockerfile +++ b/src/liblibc/ci/docker/x86_64-unknown-linux-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:16.04 +FROM ubuntu:16.10 RUN apt-get update RUN apt-get install -y --no-install-recommends \ gcc libc6-dev ca-certificates diff --git a/src/liblibc/ci/docker/x86_64-unknown-linux-musl/Dockerfile b/src/liblibc/ci/docker/x86_64-unknown-linux-musl/Dockerfile index f44003806e..9c2499948a 100644 --- a/src/liblibc/ci/docker/x86_64-unknown-linux-musl/Dockerfile +++ b/src/liblibc/ci/docker/x86_64-unknown-linux-musl/Dockerfile @@ -1,13 +1,13 @@ -FROM ubuntu:16.04 +FROM ubuntu:16.10 RUN apt-get update RUN apt-get install -y --no-install-recommends \ gcc make libc6-dev git curl ca-certificates -RUN curl https://www.musl-libc.org/releases/musl-1.1.14.tar.gz | \ +RUN curl https://www.musl-libc.org/releases/musl-1.1.15.tar.gz | \ tar xzf - && \ - cd musl-1.1.14 && \ + cd musl-1.1.15 && \ ./configure --prefix=/musl-x86_64 && \ make install -j4 && \ cd .. && \ - rm -rf musl-1.1.14 + rm -rf musl-1.1.15 ENV PATH=$PATH:/musl-x86_64/bin:/rust/bin diff --git a/src/liblibc/ci/docker/x86_64-unknown-openbsd/Dockerfile b/src/liblibc/ci/docker/x86_64-unknown-openbsd/Dockerfile index f0343c1361..26340a5ed1 100644 --- a/src/liblibc/ci/docker/x86_64-unknown-openbsd/Dockerfile +++ b/src/liblibc/ci/docker/x86_64-unknown-openbsd/Dockerfile @@ -1,7 +1,8 @@ -FROM ubuntu:16.04 +FROM ubuntu:16.10 RUN apt-get update RUN apt-get install -y --no-install-recommends \ - gcc libc6-dev qemu qemu-kvm curl ca-certificates kmod cpu-checker + gcc libc6-dev qemu curl ca-certificates \ + genext2fs ENV PATH=$PATH:/rust/bin \ - QEMU=openbsd.qcow2 + QEMU=2016-09-07/openbsd-6.0-without-pkgs.qcow2 diff --git a/src/liblibc/ci/run-docker.sh b/src/liblibc/ci/run-docker.sh index 5ad90652f0..e34e65ffcd 100644 --- a/src/liblibc/ci/run-docker.sh +++ b/src/liblibc/ci/run-docker.sh @@ -7,6 +7,7 @@ run() { echo $1 docker build -t libc ci/docker/$1 docker run \ + --rm \ -v `rustc --print sysroot`:/rust:ro \ -v `pwd`:/checkout:ro \ -e CARGO_TARGET_DIR=/tmp/target \ diff --git a/src/liblibc/ci/run-qemu.sh b/src/liblibc/ci/run-qemu.sh index 70f312e3bd..b2f457df91 100644 --- a/src/liblibc/ci/run-qemu.sh +++ b/src/liblibc/ci/run-qemu.sh @@ -19,10 +19,7 @@ export CARGO_TARGET_DIR=/tmp case $TARGET in *-openbsd) - pkg_add rust curl gcc-4.8.4p4 - curl https://static.rust-lang.org/cargo-dist/2015-04-02/cargo-nightly-x86_64-unknown-openbsd.tar.gz | \ - tar xzf - -C /tmp - export PATH=$PATH:/tmp/cargo-nightly-x86_64-unknown-openbsd/cargo/bin + pkg_add cargo gcc%4.9 rust export CC=egcc ;; diff --git a/src/liblibc/ci/run.sh b/src/liblibc/ci/run.sh index aea1de8dfe..760353bf57 100755 --- a/src/liblibc/ci/run.sh +++ b/src/liblibc/ci/run.sh @@ -16,20 +16,28 @@ TARGET=$1 if [ "$QEMU" != "" ]; then tmpdir=/tmp/qemu-img-creation mkdir -p $tmpdir - if [ ! -f $tmpdir/$QEMU ]; then - curl https://people.mozilla.org/~acrichton/libc-test/qemu/$QEMU.gz | \ - gunzip -d > $tmpdir/$QEMU + + if [ -z "${QEMU#*.gz}" ]; then + # image is .gz : download and uncompress it + qemufile=$(echo ${QEMU%.gz} | sed 's/\//__/g') + if [ ! -f $tmpdir/$qemufile ]; then + curl https://people.mozilla.org/~acrichton/libc-test/qemu/$QEMU | \ + gunzip -d > $tmpdir/$qemufile + fi + else + # plain qcow2 image: just download it + qemufile=$(echo ${QEMU} | sed 's/\//__/g') + if [ ! -f $tmpdir/$qemufile ]; then + curl https://people.mozilla.org/~acrichton/libc-test/qemu/$QEMU \ + > $tmpdir/$qemufile + fi fi # Create a mount a fresh new filesystem image that we'll later pass to QEMU. # This will have a `run.sh` script will which use the artifacts inside to run # on the host. rm -f $tmpdir/libc-test.img - dd if=/dev/null of=$tmpdir/libc-test.img bs=1M seek=50 - mkfs.ext2 -F $tmpdir/libc-test.img - rm -rf $tmpdir/mount mkdir $tmpdir/mount - mount -t ext2 -o loop $tmpdir/libc-test.img $tmpdir/mount # If we have a cross compiler, then we just do the standard rigamarole of # cross-compiling an executable and then the script to run just executes the @@ -62,25 +70,21 @@ if [ "$QEMU" != "" ]; then cp libc-test/run-generated-Cargo.toml $tmpdir/mount/libc/libc-test/Cargo.toml fi - umount $tmpdir/mount - - # If we can use kvm, prefer that, otherwise just fall back to user-space - # emulation. - if kvm-ok; then - program=kvm - else - program=qemu-system-x86_64 - fi + du -sh $tmpdir/mount + genext2fs \ + --root $tmpdir/mount \ + --size-in-blocks 100000 \ + $tmpdir/libc-test.img # Pass -snapshot to prevent tampering with the disk images, this helps when # running this script in development. The two drives are then passed next, # first is the OS and second is the one we just made. Next the network is # configured to work (I'm not entirely sure how), and then finally we turn off # graphics and redirect the serial console output to out.log. - $program \ + qemu-system-x86_64 \ -m 1024 \ -snapshot \ - -drive if=virtio,file=$tmpdir/$QEMU \ + -drive if=virtio,file=$tmpdir/$qemufile \ -drive if=virtio,file=$tmpdir/libc-test.img \ -net nic,model=virtio \ -net user \ @@ -117,6 +121,23 @@ case "$TARGET" in qemu-mips -L /usr/mips-linux-gnu $CARGO_TARGET_DIR/$TARGET/debug/libc-test ;; + mips-unknown-linux-musl) + qemu-mips -L /toolchain/staging_dir/toolchain-mips_34kc_gcc-5.3.0_musl-1.1.15 \ + $CARGO_TARGET_DIR/$TARGET/debug/libc-test + ;; + + mipsel-unknown-linux-musl) + qemu-mipsel -L /toolchain $CARGO_TARGET_DIR/$TARGET/debug/libc-test + ;; + + powerpc-unknown-linux-gnu) + qemu-ppc -L /usr/powerpc-linux-gnu $CARGO_TARGET_DIR/$TARGET/debug/libc-test + ;; + + powerpc64-unknown-linux-gnu) + qemu-ppc64 -L /usr/powerpc64-linux-gnu $CARGO_TARGET_DIR/$TARGET/debug/libc-test + ;; + aarch64-unknown-linux-gnu) qemu-aarch64 -L /usr/aarch64-linux-gnu/ $CARGO_TARGET_DIR/$TARGET/debug/libc-test ;; diff --git a/src/liblibc/libc-test/Cargo.lock b/src/liblibc/libc-test/Cargo.lock deleted file mode 100644 index 3b71c683eb..0000000000 --- a/src/liblibc/libc-test/Cargo.lock +++ /dev/null @@ -1,92 +0,0 @@ -[root] -name = "libc-test" -version = "0.1.0" -dependencies = [ - "ctest 0.1.0 (git+https://github.com/alexcrichton/ctest)", - "libc 0.2.14", -] - -[[package]] -name = "bitflags" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ctest" -version = "0.1.0" -source = "git+https://github.com/alexcrichton/ctest#a6becb6d7fd23d9863cba86eac31d1ffc4082734" -dependencies = [ - "gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "gcc" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "libc" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "libc" -version = "0.2.14" - -[[package]] -name = "log" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "rustc-serialize" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "syntex_syntax" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "term" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-xid" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - diff --git a/src/liblibc/libc-test/Cargo.toml b/src/liblibc/libc-test/Cargo.toml index 9ff476579f..dfcf127c48 100644 --- a/src/liblibc/libc-test/Cargo.toml +++ b/src/liblibc/libc-test/Cargo.toml @@ -9,3 +9,6 @@ libc = { path = ".." } [build-dependencies] ctest = { git = "https://github.com/alexcrichton/ctest" } + +[replace] +"gcc:0.3.35" = { git = "https://github.com/alexcrichton/gcc-rs" } diff --git a/src/liblibc/libc-test/build-generated.rs b/src/liblibc/libc-test/build-generated.rs index 41b562723f..a51c0e379b 100644 --- a/src/liblibc/libc-test/build-generated.rs +++ b/src/liblibc/libc-test/build-generated.rs @@ -10,6 +10,7 @@ fn main() { .flag("-Wall") .flag("-Wextra") .flag("-Werror") + .flag("-Wno-deprecated-declarations") .flag("-Wno-type-limits") .compile("liball.a"); } diff --git a/src/liblibc/libc-test/build.rs b/src/liblibc/libc-test/build.rs index a0b16f234a..bbd9f8cfcf 100644 --- a/src/liblibc/libc-test/build.rs +++ b/src/liblibc/libc-test/build.rs @@ -64,6 +64,8 @@ fn main() { cfg.header("ws2tcpip.h"); } } else { + cfg.flag("-Wno-deprecated-declarations"); + cfg.header("ctype.h"); cfg.header("dirent.h"); if openbsd { @@ -105,6 +107,7 @@ fn main() { cfg.header("arpa/inet.h"); cfg.header("time64.h"); cfg.header("xlocale.h"); + cfg.header("utmp.h"); } else if !windows { cfg.header("glob.h"); cfg.header("ifaddrs.h"); @@ -122,6 +125,12 @@ fn main() { cfg.header("execinfo.h"); cfg.header("xlocale.h"); } + + if openbsd { + cfg.header("utmp.h"); + } else { + cfg.header("utmpx.h"); + } } } @@ -151,8 +160,10 @@ fn main() { cfg.header("sys/signalfd.h"); cfg.header("sys/xattr.h"); cfg.header("sys/ipc.h"); + cfg.header("sys/msg.h"); cfg.header("sys/shm.h"); cfg.header("pty.h"); + cfg.header("shadow.h"); } if linux || android { @@ -274,6 +285,11 @@ fn main() { match ty { "sockaddr_nl" => musl, + // On Linux, the type of `ut_tv` field of `struct utmpx` + // can be an anonymous struct, so an extra struct, + // which is absent in glibc, has to be defined. + "__timeval" if linux => true, + // The alignment of this is 4 on 64-bit OSX... "kevent" if apple && x86_64 => true, @@ -318,7 +334,8 @@ fn main() { // kernel regardless "RLIMIT_NLIMITS" | "TCP_COOKIE_TRANSACTIONS" | - "RLIMIT_RTTIME" if musl => true, + "RLIMIT_RTTIME" | + "MSG_COPY" if musl => true, // work around super old mips toolchain "SCHED_IDLE" | "SHM_NORESERVE" => mips, @@ -422,7 +439,9 @@ fn main() { // This is a weird union, don't check the type. (struct_ == "ifaddrs" && field == "ifa_ifu") || // sighandler_t type is super weird - (struct_ == "sigaction" && field == "sa_sigaction") + (struct_ == "sigaction" && field == "sa_sigaction") || + // __timeval type is a patch which doesn't exist in glibc + (linux && struct_ == "utmpx" && field == "ut_tv") }); cfg.skip_field(move |struct_, field| { diff --git a/src/liblibc/libc-test/generate-files/Cargo.lock b/src/liblibc/libc-test/generate-files/Cargo.lock deleted file mode 100644 index 7b23e556c6..0000000000 --- a/src/liblibc/libc-test/generate-files/Cargo.lock +++ /dev/null @@ -1,108 +0,0 @@ -[root] -name = "generate-files" -version = "0.1.0" -dependencies = [ - "ctest 0.1.0 (git+https://github.com/alexcrichton/ctest)", -] - -[[package]] -name = "advapi32-sys" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "bitflags" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ctest" -version = "0.1.0" -source = "git+https://github.com/alexcrichton/ctest#7703b51086cce2d9a703b103d0695b36653b8cab" -dependencies = [ - "gcc 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "gcc" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "advapi32-sys 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "kernel32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "libc" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "libc" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "log" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rustc-serialize" -version = "0.3.16" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "syntex_syntax" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", - "term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "term" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-xid" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - diff --git a/src/liblibc/src/lib.rs b/src/liblibc/src/lib.rs index 9c665f8728..c25daa5de0 100644 --- a/src/liblibc/src/lib.rs +++ b/src/liblibc/src/lib.rs @@ -33,6 +33,9 @@ #![cfg_attr(all(target_os = "linux", target_arch = "aarch64"), doc( html_root_url = "https://doc.rust-lang.org/libc/aarch64-unknown-linux-gnu" ))] +#![cfg_attr(all(target_os = "linux", target_arch = "s390x"), doc( + html_root_url = "https://doc.rust-lang.org/libc/s390x-unknown-linux-gnu" +))] #![cfg_attr(all(target_os = "linux", target_env = "musl"), doc( html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-linux-musl" ))] @@ -166,6 +169,8 @@ extern { mode: c_int, size: size_t) -> c_int; pub fn setbuf(stream: *mut FILE, buf: *mut c_char); + pub fn getchar() -> c_int; + pub fn putchar(c: c_int) -> c_int; pub fn fgetc(stream: *mut FILE) -> c_int; pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; pub fn fputc(c: c_int, stream: *mut FILE) -> c_int; @@ -241,8 +246,11 @@ extern { pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; pub fn wcslen(buf: *const wchar_t) -> size_t; - pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; + pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; } // These are all inline functions on android, so they end up just being entirely diff --git a/src/liblibc/src/unix/bsd/apple/mod.rs b/src/liblibc/src/unix/bsd/apple/mod.rs index 967be9417f..aa4d1ac745 100644 --- a/src/liblibc/src/unix/bsd/apple/mod.rs +++ b/src/liblibc/src/unix/bsd/apple/mod.rs @@ -25,6 +25,17 @@ pub type sem_t = ::c_int; pub enum timezone {} s! { + pub struct utmpx { + pub ut_user: [::c_char; _UTX_USERSIZE], + pub ut_id: [::c_char; _UTX_IDSIZE], + pub ut_line: [::c_char; _UTX_LINESIZE], + pub ut_pid: ::pid_t, + pub ut_type: ::c_short, + pub ut_tv: ::timeval, + pub ut_host: [::c_char; _UTX_HOSTSIZE], + ut_pad: [::uint32_t; 16], + } + pub struct glob_t { pub gl_pathc: ::size_t, __unused1: ::c_int, @@ -294,6 +305,24 @@ s! { } } +pub const _UTX_USERSIZE: usize = 256; +pub const _UTX_LINESIZE: usize = 32; +pub const _UTX_IDSIZE: usize = 4; +pub const _UTX_HOSTSIZE: usize = 256; + +pub const EMPTY: ::c_short = 0; +pub const RUN_LVL: ::c_short = 1; +pub const BOOT_TIME: ::c_short = 2; +pub const OLD_TIME: ::c_short = 3; +pub const NEW_TIME: ::c_short = 4; +pub const INIT_PROCESS: ::c_short = 5; +pub const LOGIN_PROCESS: ::c_short = 6; +pub const USER_PROCESS: ::c_short = 7; +pub const DEAD_PROCESS: ::c_short = 8; +pub const ACCOUNTING: ::c_short = 9; +pub const SIGNATURE: ::c_short = 10; +pub const SHUTDOWN_TIME: ::c_short = 11; + pub const LC_COLLATE_MASK: ::c_int = (1 << 0); pub const LC_CTYPE_MASK: ::c_int = (1 << 1); pub const LC_MESSAGES_MASK: ::c_int = (1 << 2); @@ -592,6 +621,8 @@ pub const EOWNERDEAD: ::c_int = 105; pub const EQFULL: ::c_int = 106; pub const ELAST: ::c_int = 106; +pub const EAI_SYSTEM: ::c_int = 11; + pub const F_DUPFD: ::c_int = 0; pub const F_DUPFD_CLOEXEC: ::c_int = 67; pub const F_GETFD: ::c_int = 1; @@ -972,46 +1003,78 @@ pub const FD_SETSIZE: usize = 1024; pub const ST_NOSUID: ::c_ulong = 2; -pub const EVFILT_AIO: ::int16_t = 0xfffd; -pub const EVFILT_PROC: ::int16_t = 0xfffb; -pub const EVFILT_READ: ::int16_t = 0xffff; -pub const EVFILT_SIGNAL: ::int16_t = 0xfffa; -pub const EVFILT_SYSCOUNT: ::int16_t = 0xe; -pub const EVFILT_TIMER: ::int16_t = 0xfff9; -pub const EVFILT_VNODE: ::int16_t = 0xfffc; -pub const EVFILT_WRITE: ::int16_t = 0xfffe; -pub const EVFILT_FS: ::int16_t = 0xfff7; -pub const EVFILT_MACHPORT: ::int16_t = 0xfff8; -pub const EVFILT_USER: ::int16_t = 0xfff6; -pub const EVFILT_VM: ::int16_t = 0xfff4; - +pub const EVFILT_READ: ::int16_t = -1; +pub const EVFILT_WRITE: ::int16_t = -2; +pub const EVFILT_AIO: ::int16_t = -3; +pub const EVFILT_VNODE: ::int16_t = -4; +pub const EVFILT_PROC: ::int16_t = -5; +pub const EVFILT_SIGNAL: ::int16_t = -6; +pub const EVFILT_TIMER: ::int16_t = -7; +pub const EVFILT_MACHPORT: ::int16_t = -8; +pub const EVFILT_FS: ::int16_t = -9; +pub const EVFILT_USER: ::int16_t = -10; +pub const EVFILT_VM: ::int16_t = -12; + +pub const EV_ADD: ::uint16_t = 0x1; +pub const EV_DELETE: ::uint16_t = 0x2; +pub const EV_ENABLE: ::uint16_t = 0x4; +pub const EV_DISABLE: ::uint16_t = 0x8; +pub const EV_ONESHOT: ::uint16_t = 0x10; +pub const EV_CLEAR: ::uint16_t = 0x20; +pub const EV_RECEIPT: ::uint16_t = 0x40; pub const EV_DISPATCH: ::uint16_t = 0x80; pub const EV_FLAG0: ::uint16_t = 0x1000; -pub const EV_OOBAND: ::uint16_t = 0x2000; pub const EV_POLL: ::uint16_t = 0x1000; -pub const EV_RECEIPT: ::uint16_t = 0x40; +pub const EV_FLAG1: ::uint16_t = 0x2000; +pub const EV_OOBAND: ::uint16_t = 0x2000; +pub const EV_ERROR: ::uint16_t = 0x4000; +pub const EV_EOF: ::uint16_t = 0x8000; +pub const EV_SYSFLAGS: ::uint16_t = 0xf000; -pub const NOTE_ABSOLUTE: ::uint32_t = 0x8; -pub const NOTE_EXITSTATUS: ::uint32_t = 0x04000000; -pub const NOTE_EXIT_REPARENTED: ::uint32_t = 0x00080000; +pub const NOTE_TRIGGER: ::uint32_t = 0x01000000; +pub const NOTE_FFNOP: ::uint32_t = 0x00000000; pub const NOTE_FFAND: ::uint32_t = 0x40000000; +pub const NOTE_FFOR: ::uint32_t = 0x80000000; pub const NOTE_FFCOPY: ::uint32_t = 0xc0000000; pub const NOTE_FFCTRLMASK: ::uint32_t = 0xc0000000; pub const NOTE_FFLAGSMASK: ::uint32_t = 0x00ffffff; -pub const NOTE_FFNOP: ::uint32_t = 0x0; -pub const NOTE_FFOR: ::uint32_t = 0x80000000; -pub const NOTE_NONE: ::uint32_t = 0x80; -pub const NOTE_NSECONDS: ::uint32_t = 0x4; +pub const NOTE_LOWAT: ::uint32_t = 0x00000001; +pub const NOTE_DELETE: ::uint32_t = 0x00000001; +pub const NOTE_WRITE: ::uint32_t = 0x00000002; +pub const NOTE_EXTEND: ::uint32_t = 0x00000004; +pub const NOTE_ATTRIB: ::uint32_t = 0x00000008; +pub const NOTE_LINK: ::uint32_t = 0x00000010; +pub const NOTE_RENAME: ::uint32_t = 0x00000020; +pub const NOTE_REVOKE: ::uint32_t = 0x00000040; +pub const NOTE_NONE: ::uint32_t = 0x00000080; +pub const NOTE_EXIT: ::uint32_t = 0x80000000; +pub const NOTE_FORK: ::uint32_t = 0x40000000; +pub const NOTE_EXEC: ::uint32_t = 0x20000000; pub const NOTE_REAP: ::uint32_t = 0x10000000; -pub const NOTE_SECONDS: ::uint32_t = 0x1; -pub const NOTE_SIGNAL: ::uint32_t = 0x8000000; -pub const NOTE_TRIGGER: ::uint32_t = 0x01000000; -pub const NOTE_USECONDS: ::uint32_t = 0x2; -pub const NOTE_VM_ERROR: ::uint32_t = 0x10000000; +pub const NOTE_SIGNAL: ::uint32_t = 0x08000000; +pub const NOTE_EXITSTATUS: ::uint32_t = 0x04000000; +pub const NOTE_EXIT_DETAIL: ::uint32_t = 0x02000000; +pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff; +pub const NOTE_PCTRLMASK: ::uint32_t = 0xfff00000; +pub const NOTE_EXIT_REPARENTED: ::uint32_t = 0x00080000; +pub const NOTE_EXIT_DETAIL_MASK: ::uint32_t = 0x00070000; +pub const NOTE_EXIT_DECRYPTFAIL: ::uint32_t = 0x00010000; +pub const NOTE_EXIT_MEMORY: ::uint32_t = 0x00020000; +pub const NOTE_EXIT_CSERROR: ::uint32_t = 0x00040000; pub const NOTE_VM_PRESSURE: ::uint32_t = 0x80000000; -pub const NOTE_VM_PRESSURE_SUDDEN_TERMINATE: ::uint32_t = 0x20000000; pub const NOTE_VM_PRESSURE_TERMINATE: ::uint32_t = 0x40000000; -pub const NOTE_PCTRLMASK: ::uint32_t = 0xfff00000; +pub const NOTE_VM_PRESSURE_SUDDEN_TERMINATE: ::uint32_t = 0x20000000; +pub const NOTE_VM_ERROR: ::uint32_t = 0x10000000; +pub const NOTE_SECONDS: ::uint32_t = 0x00000001; +pub const NOTE_USECONDS: ::uint32_t = 0x00000002; +pub const NOTE_NSECONDS: ::uint32_t = 0x00000004; +pub const NOTE_ABSOLUTE: ::uint32_t = 0x00000008; +pub const NOTE_LEEWAY: ::uint32_t = 0x00000010; +pub const NOTE_CRITICAL: ::uint32_t = 0x00000020; +pub const NOTE_BACKGROUND: ::uint32_t = 0x00000040; +pub const NOTE_TRACK: ::uint32_t = 0x00000001; +pub const NOTE_TRACKERR: ::uint32_t = 0x00000002; +pub const NOTE_CHILD: ::uint32_t = 0x00000004; pub const NL0: ::c_int = 0x00000000; pub const NL1: ::c_int = 0x00000100; @@ -1256,6 +1319,11 @@ pub const CTL_DEBUG_NAME: ::c_int = 0; pub const CTL_DEBUG_VALUE: ::c_int = 1; pub const CTL_DEBUG_MAXID: ::c_int = 20; +pub const POLLRDNORM: ::c_short = 0x040; +pub const POLLWRNORM: ::c_short = 0x004; +pub const POLLRDBAND: ::c_short = 0x080; +pub const POLLWRBAND: ::c_short = 0x100; + pub const PRIO_DARWIN_THREAD: ::c_int = 3; pub const PRIO_DARWIN_PROCESS: ::c_int = 4; pub const PRIO_DARWIN_BG: ::c_int = 0x1000; @@ -1286,6 +1354,16 @@ f! { } extern { + pub fn lutimes(file: *const ::c_char, times: *const ::timeval) -> ::c_int; + + pub fn getutxent() -> *mut utmpx; + pub fn getutxid(ut: *const utmpx) -> *mut utmpx; + pub fn getutxline(ut: *const utmpx) -> *mut utmpx; + pub fn pututxline(ut: *const utmpx) -> *mut utmpx; + pub fn setutxent(); + pub fn endutxent(); + pub fn utmpxname(file: *const ::c_char) -> ::c_int; + pub fn getnameinfo(sa: *const ::sockaddr, salen: ::socklen_t, host: *mut ::c_char, diff --git a/src/liblibc/src/unix/bsd/freebsdlike/dragonfly/mod.rs b/src/liblibc/src/unix/bsd/freebsdlike/dragonfly/mod.rs index 6d31ad8db8..179cd913b3 100644 --- a/src/liblibc/src/unix/bsd/freebsdlike/dragonfly/mod.rs +++ b/src/liblibc/src/unix/bsd/freebsdlike/dragonfly/mod.rs @@ -243,6 +243,58 @@ pub const CTL_P1003_1B_SIGQUEUE_MAX: ::c_int = 24; pub const CTL_P1003_1B_TIMER_MAX: ::c_int = 25; pub const CTL_P1003_1B_MAXID: ::c_int = 26; +pub const EVFILT_READ: ::int16_t = -1; +pub const EVFILT_WRITE: ::int16_t = -2; +pub const EVFILT_AIO: ::int16_t = -3; +pub const EVFILT_VNODE: ::int16_t = -4; +pub const EVFILT_PROC: ::int16_t = -5; +pub const EVFILT_SIGNAL: ::int16_t = -6; +pub const EVFILT_TIMER: ::int16_t = -7; +pub const EVFILT_PROCDESC: ::int16_t = -8; +pub const EVFILT_USER: ::int16_t = -9; +pub const EVFILT_FS: ::int16_t = -10; + +pub const EV_ADD: ::uint16_t = 0x1; +pub const EV_DELETE: ::uint16_t = 0x2; +pub const EV_ENABLE: ::uint16_t = 0x4; +pub const EV_DISABLE: ::uint16_t = 0x8; +pub const EV_ONESHOT: ::uint16_t = 0x10; +pub const EV_CLEAR: ::uint16_t = 0x20; +pub const EV_RECEIPT: ::uint16_t = 0x40; +pub const EV_DISPATCH: ::uint16_t = 0x80; +pub const EV_NODATA: ::uint16_t = 0x1000; +pub const EV_FLAG1: ::uint16_t = 0x2000; +pub const EV_ERROR: ::uint16_t = 0x4000; +pub const EV_EOF: ::uint16_t = 0x8000; +pub const EV_SYSFLAGS: ::uint16_t = 0xf000; + +pub const NOTE_TRIGGER: ::uint32_t = 0x01000000; +pub const NOTE_FFNOP: ::uint32_t = 0x00000000; +pub const NOTE_FFAND: ::uint32_t = 0x40000000; +pub const NOTE_FFOR: ::uint32_t = 0x80000000; +pub const NOTE_FFCOPY: ::uint32_t = 0xc0000000; +pub const NOTE_FFCTRLMASK: ::uint32_t = 0xc0000000; +pub const NOTE_FFLAGSMASK: ::uint32_t = 0x00ffffff; +pub const NOTE_LOWAT: ::uint32_t = 0x00000001; +pub const NOTE_OOB: ::uint32_t = 0x00000002; +pub const NOTE_DELETE: ::uint32_t = 0x00000001; +pub const NOTE_WRITE: ::uint32_t = 0x00000002; +pub const NOTE_EXTEND: ::uint32_t = 0x00000004; +pub const NOTE_ATTRIB: ::uint32_t = 0x00000008; +pub const NOTE_LINK: ::uint32_t = 0x00000010; +pub const NOTE_RENAME: ::uint32_t = 0x00000020; +pub const NOTE_REVOKE: ::uint32_t = 0x00000040; +pub const NOTE_EXIT: ::uint32_t = 0x80000000; +pub const NOTE_FORK: ::uint32_t = 0x40000000; +pub const NOTE_EXEC: ::uint32_t = 0x20000000; +pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff; +pub const NOTE_PCTRLMASK: ::uint32_t = 0xf0000000; +pub const NOTE_TRACK: ::uint32_t = 0x00000001; +pub const NOTE_TRACKERR: ::uint32_t = 0x00000002; +pub const NOTE_CHILD: ::uint32_t = 0x00000004; + +pub const MSG_NOSIGNAL: ::uint32_t = 0x400; + extern { pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int) -> ::c_int; diff --git a/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs b/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs index 1596a93cf8..a89440ebde 100644 --- a/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs +++ b/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs @@ -57,6 +57,59 @@ pub const POSIX_FADV_WILLNEED: ::c_int = 3; pub const POSIX_FADV_DONTNEED: ::c_int = 4; pub const POSIX_FADV_NOREUSE: ::c_int = 5; +pub const EVFILT_READ: ::int16_t = -1; +pub const EVFILT_WRITE: ::int16_t = -2; +pub const EVFILT_AIO: ::int16_t = -3; +pub const EVFILT_VNODE: ::int16_t = -4; +pub const EVFILT_PROC: ::int16_t = -5; +pub const EVFILT_SIGNAL: ::int16_t = -6; +pub const EVFILT_TIMER: ::int16_t = -7; +pub const EVFILT_FS: ::int16_t = -9; +pub const EVFILT_LIO: ::int16_t = -10; +pub const EVFILT_USER: ::int16_t = -11; + +pub const EV_ADD: ::uint16_t = 0x1; +pub const EV_DELETE: ::uint16_t = 0x2; +pub const EV_ENABLE: ::uint16_t = 0x4; +pub const EV_DISABLE: ::uint16_t = 0x8; +pub const EV_ONESHOT: ::uint16_t = 0x10; +pub const EV_CLEAR: ::uint16_t = 0x20; +pub const EV_RECEIPT: ::uint16_t = 0x40; +pub const EV_DISPATCH: ::uint16_t = 0x80; +pub const EV_DROP: ::uint16_t = 0x1000; +pub const EV_FLAG1: ::uint16_t = 0x2000; +pub const EV_ERROR: ::uint16_t = 0x4000; +pub const EV_EOF: ::uint16_t = 0x8000; +pub const EV_SYSFLAGS: ::uint16_t = 0xf000; + +pub const NOTE_TRIGGER: ::uint32_t = 0x01000000; +pub const NOTE_FFNOP: ::uint32_t = 0x00000000; +pub const NOTE_FFAND: ::uint32_t = 0x40000000; +pub const NOTE_FFOR: ::uint32_t = 0x80000000; +pub const NOTE_FFCOPY: ::uint32_t = 0xc0000000; +pub const NOTE_FFCTRLMASK: ::uint32_t = 0xc0000000; +pub const NOTE_FFLAGSMASK: ::uint32_t = 0x00ffffff; +pub const NOTE_LOWAT: ::uint32_t = 0x00000001; +pub const NOTE_DELETE: ::uint32_t = 0x00000001; +pub const NOTE_WRITE: ::uint32_t = 0x00000002; +pub const NOTE_EXTEND: ::uint32_t = 0x00000004; +pub const NOTE_ATTRIB: ::uint32_t = 0x00000008; +pub const NOTE_LINK: ::uint32_t = 0x00000010; +pub const NOTE_RENAME: ::uint32_t = 0x00000020; +pub const NOTE_REVOKE: ::uint32_t = 0x00000040; +pub const NOTE_EXIT: ::uint32_t = 0x80000000; +pub const NOTE_FORK: ::uint32_t = 0x40000000; +pub const NOTE_EXEC: ::uint32_t = 0x20000000; +pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff; +pub const NOTE_PCTRLMASK: ::uint32_t = 0xf0000000; +pub const NOTE_TRACK: ::uint32_t = 0x00000001; +pub const NOTE_TRACKERR: ::uint32_t = 0x00000002; +pub const NOTE_CHILD: ::uint32_t = 0x00000004; +pub const NOTE_SECONDS: ::uint32_t = 0x00000001; +pub const NOTE_MSECONDS: ::uint32_t = 0x00000002; +pub const NOTE_USECONDS: ::uint32_t = 0x00000004; +pub const NOTE_NSECONDS: ::uint32_t = 0x00000008; + pub const MADV_PROTECT: ::c_int = 10; pub const RUSAGE_THREAD: ::c_int = 1; @@ -219,6 +272,8 @@ pub const CTL_P1003_1B_SIGQUEUE_MAX: ::c_int = 24; pub const CTL_P1003_1B_TIMER_MAX: ::c_int = 25; pub const CTL_P1003_1B_MAXID: ::c_int = 26; +pub const MSG_NOSIGNAL: ::c_int = 0x20000; + extern { pub fn __error() -> *mut ::c_int; diff --git a/src/liblibc/src/unix/bsd/freebsdlike/mod.rs b/src/liblibc/src/unix/bsd/freebsdlike/mod.rs index 89c375c667..2cfb323c06 100644 --- a/src/liblibc/src/unix/bsd/freebsdlike/mod.rs +++ b/src/liblibc/src/unix/bsd/freebsdlike/mod.rs @@ -17,15 +17,24 @@ pub type sem_t = _sem; pub enum timezone {} s! { + pub struct utmpx { + pub ut_type: ::c_short, + pub ut_tv: ::timeval, + pub ut_id: [::c_char; 8], + pub ut_pid: ::pid_t, + pub ut_user: [::c_char; 32], + pub ut_line: [::c_char; 16], + pub ut_host: [::c_char; 128], + pub __ut_spare: [::c_char; 64], + } + pub struct glob_t { - pub gl_pathc: ::size_t, - __unused1: ::size_t, - pub gl_offs: ::size_t, - __unused2: ::c_int, + pub gl_pathc: ::size_t, + pub gl_matchc: ::size_t, + pub gl_offs: ::size_t, + pub gl_flags: ::c_int, pub gl_pathv: *mut *mut ::c_char, - __unused3: *mut ::c_void, - __unused4: *mut ::c_void, __unused5: *mut ::c_void, __unused6: *mut ::c_void, @@ -33,6 +42,15 @@ s! { __unused8: *mut ::c_void, } + pub struct kevent { + pub ident: ::uintptr_t, + pub filter: ::c_short, + pub flags: ::c_ushort, + pub fflags: ::c_uint, + pub data: ::intptr_t, + pub udata: *mut ::c_void, + } + pub struct sockaddr_storage { pub ss_len: u8, pub ss_family: ::sa_family_t, @@ -158,6 +176,16 @@ s! { } } +pub const EMPTY: ::c_short = 0; +pub const BOOT_TIME: ::c_short = 1; +pub const OLD_TIME: ::c_short = 2; +pub const NEW_TIME: ::c_short = 3; +pub const USER_PROCESS: ::c_short = 4; +pub const INIT_PROCESS: ::c_short = 5; +pub const LOGIN_PROCESS: ::c_short = 6; +pub const DEAD_PROCESS: ::c_short = 7; +pub const SHUTDOWN_TIME: ::c_short = 8; + pub const LC_COLLATE_MASK: ::c_int = (1 << 0); pub const LC_CTYPE_MASK: ::c_int = (1 << 1); pub const LC_MESSAGES_MASK: ::c_int = (1 << 2); @@ -439,6 +467,8 @@ pub const EMULTIHOP: ::c_int = 90; pub const ENOLINK: ::c_int = 91; pub const EPROTO: ::c_int = 92; +pub const EAI_SYSTEM: ::c_int = 11; + pub const F_DUPFD: ::c_int = 0; pub const F_GETFD: ::c_int = 1; pub const F_SETFD: ::c_int = 2; @@ -701,6 +731,18 @@ f! { } } +extern { + pub fn lutimes(file: *const ::c_char, times: *const ::timeval) -> ::c_int; + pub fn endutxent(); + pub fn getutxent() -> *mut utmpx; + pub fn getutxid(ut: *const utmpx) -> *mut utmpx; + pub fn getutxline(ut: *const utmpx) -> *mut utmpx; + pub fn pututxline(ut: *const utmpx) -> *mut utmpx; + pub fn setutxent(); + pub fn getutxuser(user: *const ::c_char) -> *mut utmpx; + pub fn setutxdb(_type: ::c_int, file: *const ::c_char) -> ::c_int; +} + #[link(name = "util")] extern { pub fn getnameinfo(sa: *const ::sockaddr, @@ -710,6 +752,12 @@ extern { serv: *mut ::c_char, servlen: ::size_t, flags: ::c_int) -> ::c_int; + pub fn kevent(kq: ::c_int, + changelist: *const ::kevent, + nchanges: ::c_int, + eventlist: *mut ::kevent, + nevents: ::c_int, + timeout: *const ::timespec) -> ::c_int; pub fn mincore(addr: *const ::c_void, len: ::size_t, vec: *mut ::c_char) -> ::c_int; pub fn sysctlnametomib(name: *const ::c_char, @@ -812,6 +860,10 @@ extern { pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, clock_id: clockid_t) -> ::c_int; pub fn sethostname(name: *const ::c_char, len: ::c_int) -> ::c_int; + pub fn sem_timedwait(sem: *mut sem_t, + abstime: *const ::timespec) -> ::c_int; + pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, + abstime: *const ::timespec) -> ::c_int; } cfg_if! { diff --git a/src/liblibc/src/unix/bsd/mod.rs b/src/liblibc/src/unix/bsd/mod.rs index f36fa61cab..0616cd3518 100644 --- a/src/liblibc/src/unix/bsd/mod.rs +++ b/src/liblibc/src/unix/bsd/mod.rs @@ -179,33 +179,6 @@ pub const IPV6_V6ONLY: ::c_int = 27; pub const ST_RDONLY: ::c_ulong = 1; -pub const EV_ADD: ::uint16_t = 0x1; -pub const EV_CLEAR: ::uint16_t = 0x20; -pub const EV_DELETE: ::uint16_t = 0x2; -pub const EV_DISABLE: ::uint16_t = 0x8; -pub const EV_ENABLE: ::uint16_t = 0x4; -pub const EV_EOF: ::uint16_t = 0x8000; -pub const EV_ERROR: ::uint16_t = 0x4000; -pub const EV_FLAG1: ::uint16_t = 0x2000; -pub const EV_ONESHOT: ::uint16_t = 0x10; -pub const EV_SYSFLAGS: ::uint16_t = 0xf000; - -pub const NOTE_ATTRIB: ::uint32_t = 0x8; -pub const NOTE_CHILD: ::uint32_t = 0x4; -pub const NOTE_DELETE: ::uint32_t = 0x1; -pub const NOTE_EXEC: ::uint32_t = 0x20000000; -pub const NOTE_EXIT: ::uint32_t = 0x80000000; -pub const NOTE_EXTEND: ::uint32_t = 0x4; -pub const NOTE_FORK: ::uint32_t = 0x40000000; -pub const NOTE_LINK: ::uint32_t = 0x10; -pub const NOTE_LOWAT: ::uint32_t = 0x1; -pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff; -pub const NOTE_RENAME: ::uint32_t = 0x20; -pub const NOTE_REVOKE: ::uint32_t = 0x40; -pub const NOTE_TRACK: ::uint32_t = 0x1; -pub const NOTE_TRACKERR: ::uint32_t = 0x2; -pub const NOTE_WRITE: ::uint32_t = 0x2; - pub const NCCS: usize = 20; pub const O_ASYNC: ::c_int = 0x40; @@ -348,18 +321,29 @@ f! { } extern { + pub fn getifaddrs(ifap: *mut *mut ::ifaddrs) -> ::c_int; + pub fn freeifaddrs(ifa: *mut ::ifaddrs); pub fn setgroups(ngroups: ::c_int, ptr: *const ::gid_t) -> ::c_int; pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int; pub fn kqueue() -> ::c_int; pub fn unmount(target: *const ::c_char, arg: ::c_int) -> ::c_int; pub fn syscall(num: ::c_int, ...) -> ::c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__getpwnam_r50")] + pub fn getpwnam_r(name: *const ::c_char, + pwd: *mut passwd, + buf: *mut ::c_char, + buflen: ::size_t, + result: *mut *mut passwd) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__getpwuid_r50")] pub fn getpwuid_r(uid: ::uid_t, pwd: *mut passwd, buf: *mut ::c_char, buflen: ::size_t, result: *mut *mut passwd) -> ::c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__getpwent50")] + pub fn getpwent() -> *mut passwd; + pub fn setpwent(); pub fn getprogname() -> *const ::c_char; pub fn setprogname(name: *const ::c_char); pub fn getloadavg(loadavg: *mut ::c_double, nelem: ::c_int) -> ::c_int; @@ -373,8 +357,8 @@ cfg_if! { pub use self::apple::*; } else if #[cfg(any(target_os = "openbsd", target_os = "netbsd", target_os = "bitrig"))] { - mod openbsdlike; - pub use self::openbsdlike::*; + mod netbsdlike; + pub use self::netbsdlike::*; } else if #[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] { mod freebsdlike; pub use self::freebsdlike::*; diff --git a/src/liblibc/src/unix/bsd/openbsdlike/mod.rs b/src/liblibc/src/unix/bsd/netbsdlike/mod.rs similarity index 96% rename from src/liblibc/src/unix/bsd/openbsdlike/mod.rs rename to src/liblibc/src/unix/bsd/netbsdlike/mod.rs index e81640f066..6604ec03b2 100644 --- a/src/liblibc/src/unix/bsd/openbsdlike/mod.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/mod.rs @@ -1,5 +1,3 @@ -pub type c_long = i64; -pub type c_ulong = u64; pub type time_t = i64; pub type mode_t = u32; pub type nlink_t = ::uint32_t; @@ -394,6 +392,8 @@ pub const SO_RCVLOWAT: ::c_int = 0x1004; pub const SO_ERROR: ::c_int = 0x1007; pub const SO_TYPE: ::c_int = 0x1008; +pub const MSG_NOSIGNAL: ::c_int = 0x400; + pub const IFF_LOOPBACK: ::c_int = 0x8; pub const SHUT_RD: ::c_int = 0; @@ -525,20 +525,22 @@ extern { flags: ::c_int) -> ::c_int; pub fn mkfifoat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t) -> ::c_int; - pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, - clock_id: clockid_t) -> ::c_int; + pub fn sem_timedwait(sem: *mut sem_t, + abstime: *const ::timespec) -> ::c_int; + pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, + clock_id: clockid_t) -> ::c_int; + pub fn sethostname(name: *const ::c_char, len: ::size_t) -> ::c_int; + pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, + abstime: *const ::timespec) -> ::c_int; } cfg_if! { - if #[cfg(target_os = "bitrig")] { - mod bitrig; - pub use self::bitrig::*; - } else if #[cfg(target_os = "netbsd")] { + if #[cfg(target_os = "netbsd")] { mod netbsd; pub use self::netbsd::*; - } else if #[cfg(target_os = "openbsd")] { - mod openbsd; - pub use self::openbsd::*; + } else if #[cfg(any(target_os = "openbsd", target_os = "bitrig"))] { + mod openbsdlike; + pub use self::openbsdlike::*; } else { // Unknown target_os } diff --git a/src/liblibc/src/unix/bsd/openbsdlike/netbsd.rs b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/mod.rs similarity index 92% rename from src/liblibc/src/unix/bsd/openbsdlike/netbsd.rs rename to src/liblibc/src/unix/bsd/netbsdlike/netbsd/mod.rs index 5e5e5ae8a5..aa46aff191 100644 --- a/src/liblibc/src/unix/bsd/openbsdlike/netbsd.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/mod.rs @@ -16,9 +16,9 @@ s! { pub struct glob_t { pub gl_pathc: ::size_t, - __unused1: ::c_int, + pub gl_matchc: ::size_t, pub gl_offs: ::size_t, - __unused2: ::c_int, + pub gl_flags: ::c_int, pub gl_pathv: *mut *mut ::c_char, __unused3: *mut ::c_void, @@ -355,16 +355,43 @@ pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 1; pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 2; pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_NORMAL; -pub const EVFILT_AIO: ::int16_t = 2; -pub const EVFILT_PROC: ::int16_t = 4; -pub const EVFILT_READ: ::int16_t = 0; -pub const EVFILT_SIGNAL: ::int16_t = 5; -pub const EVFILT_SYSCOUNT: ::int16_t = 7; -pub const EVFILT_TIMER: ::int16_t = 6; -pub const EVFILT_VNODE: ::int16_t = 3; -pub const EVFILT_WRITE: ::int16_t = 1; - +pub const EVFILT_AIO: ::int32_t = 2; +pub const EVFILT_PROC: ::int32_t = 4; +pub const EVFILT_READ: ::int32_t = 0; +pub const EVFILT_SIGNAL: ::int32_t = 5; +pub const EVFILT_TIMER: ::int32_t = 6; +pub const EVFILT_VNODE: ::int32_t = 3; +pub const EVFILT_WRITE: ::int32_t = 1; + +pub const EV_ADD: ::uint32_t = 0x1; +pub const EV_DELETE: ::uint32_t = 0x2; +pub const EV_ENABLE: ::uint32_t = 0x4; +pub const EV_DISABLE: ::uint32_t = 0x8; +pub const EV_ONESHOT: ::uint32_t = 0x10; +pub const EV_CLEAR: ::uint32_t = 0x20; +pub const EV_RECEIPT: ::uint32_t = 0x40; +pub const EV_DISPATCH: ::uint32_t = 0x80; +pub const EV_FLAG1: ::uint32_t = 0x2000; +pub const EV_ERROR: ::uint32_t = 0x4000; +pub const EV_EOF: ::uint32_t = 0x8000; +pub const EV_SYSFLAGS: ::uint32_t = 0xf000; + +pub const NOTE_LOWAT: ::uint32_t = 0x00000001; +pub const NOTE_DELETE: ::uint32_t = 0x00000001; +pub const NOTE_WRITE: ::uint32_t = 0x00000002; +pub const NOTE_EXTEND: ::uint32_t = 0x00000004; +pub const NOTE_ATTRIB: ::uint32_t = 0x00000008; +pub const NOTE_LINK: ::uint32_t = 0x00000010; +pub const NOTE_RENAME: ::uint32_t = 0x00000020; +pub const NOTE_REVOKE: ::uint32_t = 0x00000040; +pub const NOTE_EXIT: ::uint32_t = 0x80000000; +pub const NOTE_FORK: ::uint32_t = 0x40000000; +pub const NOTE_EXEC: ::uint32_t = 0x20000000; +pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff; pub const NOTE_PCTRLMASK: ::uint32_t = 0xf0000000; +pub const NOTE_TRACK: ::uint32_t = 0x00000001; +pub const NOTE_TRACKERR: ::uint32_t = 0x00000002; +pub const NOTE_CHILD: ::uint32_t = 0x00000004; pub const CRTSCTS: ::tcflag_t = 0x00010000; @@ -520,7 +547,10 @@ pub const KERN_PROC_RUID: ::c_int = 6; pub const KERN_PROC_GID: ::c_int = 7; pub const KERN_PROC_RGID: ::c_int = 8; +pub const EAI_SYSTEM: ::c_int = 11; + extern { + pub fn lutimes(file: *const ::c_char, times: *const ::timeval) -> ::c_int; pub fn getnameinfo(sa: *const ::sockaddr, salen: ::socklen_t, host: *mut ::c_char, @@ -560,7 +590,6 @@ extern { pid: ::pid_t, addr: *mut ::c_void, data: ::c_int) -> ::c_int; - pub fn sethostname(name: *const ::c_char, len: ::size_t) -> ::c_int; pub fn pthread_setname_np(t: ::pthread_t, name: *const ::c_char, arg: *mut ::c_void) -> ::c_int; @@ -584,3 +613,6 @@ extern { locale: *const ::c_char, base: ::locale_t) -> ::locale_t; } + +mod other; +pub use self::other::*; diff --git a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs new file mode 100644 index 0000000000..9b0b338b91 --- /dev/null +++ b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs @@ -0,0 +1,2 @@ +pub type c_long = i32; +pub type c_ulong = u32; diff --git a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs new file mode 100644 index 0000000000..b07c476aa4 --- /dev/null +++ b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs @@ -0,0 +1,2 @@ +pub type c_long = i64; +pub type c_ulong = u64; diff --git a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/other/mod.rs b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/other/mod.rs new file mode 100644 index 0000000000..f4e7cc4061 --- /dev/null +++ b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/other/mod.rs @@ -0,0 +1,13 @@ +cfg_if! { + if #[cfg(target_arch = "x86_64")] { + mod b64; + pub use self::b64::*; + } else if #[cfg(any(target_arch = "arm", + target_arch = "powerpc", + target_arch = "x86"))] { + mod b32; + pub use self::b32::*; + } else { + // Unknown target_arch + } +} diff --git a/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/bitrig.rs b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/bitrig.rs new file mode 100644 index 0000000000..695cf68dc5 --- /dev/null +++ b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/bitrig.rs @@ -0,0 +1,75 @@ +s! { + pub struct lconv { + pub decimal_point: *mut ::c_char, + pub thousands_sep: *mut ::c_char, + pub grouping: *mut ::c_char, + pub int_curr_symbol: *mut ::c_char, + pub currency_symbol: *mut ::c_char, + pub mon_decimal_point: *mut ::c_char, + pub mon_thousands_sep: *mut ::c_char, + pub mon_grouping: *mut ::c_char, + pub positive_sign: *mut ::c_char, + pub negative_sign: *mut ::c_char, + pub int_frac_digits: ::c_char, + pub frac_digits: ::c_char, + pub p_cs_precedes: ::c_char, + pub p_sep_by_space: ::c_char, + pub n_cs_precedes: ::c_char, + pub n_sep_by_space: ::c_char, + pub p_sign_posn: ::c_char, + pub n_sign_posn: ::c_char, + pub int_p_cs_precedes: ::c_char, + pub int_n_cs_precedes: ::c_char, + pub int_p_sep_by_space: ::c_char, + pub int_n_sep_by_space: ::c_char, + pub int_p_sign_posn: ::c_char, + pub int_n_sign_posn: ::c_char, + } +} + +pub const LC_COLLATE_MASK: ::c_int = (1 << 0); +pub const LC_CTYPE_MASK: ::c_int = (1 << 1); +pub const LC_MESSAGES_MASK: ::c_int = (1 << 2); +pub const LC_MONETARY_MASK: ::c_int = (1 << 3); +pub const LC_NUMERIC_MASK: ::c_int = (1 << 4); +pub const LC_TIME_MASK: ::c_int = (1 << 5); +pub const LC_ALL_MASK: ::c_int = LC_COLLATE_MASK + | LC_CTYPE_MASK + | LC_MESSAGES_MASK + | LC_MONETARY_MASK + | LC_NUMERIC_MASK + | LC_TIME_MASK; + +pub const ERA: ::nl_item = 52; +pub const ERA_D_FMT: ::nl_item = 53; +pub const ERA_D_T_FMT: ::nl_item = 54; +pub const ERA_T_FMT: ::nl_item = 55; +pub const ALT_DIGITS: ::nl_item = 56; + +pub const D_MD_ORDER: ::nl_item = 57; + +pub const ALTMON_1: ::nl_item = 58; +pub const ALTMON_2: ::nl_item = 59; +pub const ALTMON_3: ::nl_item = 60; +pub const ALTMON_4: ::nl_item = 61; +pub const ALTMON_5: ::nl_item = 62; +pub const ALTMON_6: ::nl_item = 63; +pub const ALTMON_7: ::nl_item = 64; +pub const ALTMON_8: ::nl_item = 65; +pub const ALTMON_9: ::nl_item = 66; +pub const ALTMON_10: ::nl_item = 67; +pub const ALTMON_11: ::nl_item = 68; +pub const ALTMON_12: ::nl_item = 69; + +pub const KERN_RND: ::c_int = 31; + +extern { + pub fn nl_langinfo_l(item: ::nl_item, locale: ::locale_t) -> *mut ::c_char; + pub fn duplocale(base: ::locale_t) -> ::locale_t; + pub fn freelocale(loc: ::locale_t) -> ::c_int; + pub fn newlocale(mask: ::c_int, + locale: *const ::c_char, + base: ::locale_t) -> ::locale_t; + pub fn uselocale(loc: ::locale_t) -> ::locale_t; + pub fn querylocale(mask: ::c_int, loc: ::locale_t) -> *const ::c_char; +} diff --git a/src/liblibc/src/unix/bsd/openbsdlike/openbsd.rs b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/mod.rs similarity index 81% rename from src/liblibc/src/unix/bsd/openbsdlike/openbsd.rs rename to src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/mod.rs index b608fa5070..d7afb11edd 100644 --- a/src/liblibc/src/unix/bsd/openbsdlike/openbsd.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/mod.rs @@ -1,3 +1,5 @@ +pub type c_long = i64; +pub type c_ulong = u64; pub type clock_t = i64; pub type suseconds_t = i64; pub type dev_t = i32; @@ -25,19 +27,26 @@ s! { pub struct glob_t { pub gl_pathc: ::c_int, - __unused1: ::c_int, + pub gl_matchc: ::c_int, pub gl_offs: ::c_int, - __unused2: ::c_int, + pub gl_flags: ::c_int, pub gl_pathv: *mut *mut ::c_char, - + __unused1: *mut ::c_void, + __unused2: *mut ::c_void, __unused3: *mut ::c_void, - __unused4: *mut ::c_void, __unused5: *mut ::c_void, __unused6: *mut ::c_void, __unused7: *mut ::c_void, - __unused8: *mut ::c_void, - __unused9: *mut ::c_void, + } + + pub struct kevent { + pub ident: ::uintptr_t, + pub filter: ::c_short, + pub flags: ::c_ushort, + pub fflags: ::c_uint, + pub data: ::int64_t, + pub udata: *mut ::c_void, } pub struct stat { @@ -111,34 +120,24 @@ s! { pub dli_saddr: *mut ::c_void, } - pub struct lconv { - pub decimal_point: *mut ::c_char, - pub thousands_sep: *mut ::c_char, - pub grouping: *mut ::c_char, - pub int_curr_symbol: *mut ::c_char, - pub currency_symbol: *mut ::c_char, - pub mon_decimal_point: *mut ::c_char, - pub mon_thousands_sep: *mut ::c_char, - pub mon_grouping: *mut ::c_char, - pub positive_sign: *mut ::c_char, - pub negative_sign: *mut ::c_char, - pub int_frac_digits: ::c_char, - pub frac_digits: ::c_char, - pub p_cs_precedes: ::c_char, - pub p_sep_by_space: ::c_char, - pub n_cs_precedes: ::c_char, - pub n_sep_by_space: ::c_char, - pub p_sign_posn: ::c_char, - pub n_sign_posn: ::c_char, - pub int_p_cs_precedes: ::c_char, - pub int_p_sep_by_space: ::c_char, - pub int_n_cs_precedes: ::c_char, - pub int_n_sep_by_space: ::c_char, - pub int_p_sign_posn: ::c_char, - pub int_n_sign_posn: ::c_char, + pub struct lastlog { + ll_time: ::time_t, + ll_line: [::c_char; UT_LINESIZE], + ll_host: [::c_char; UT_HOSTSIZE], + } + + pub struct utmp { + pub ut_line: [::c_char; UT_LINESIZE], + pub ut_name: [::c_char; UT_NAMESIZE], + pub ut_host: [::c_char; UT_HOSTSIZE], + pub ut_time: ::time_t, } } +pub const UT_NAMESIZE: usize = 32; +pub const UT_LINESIZE: usize = 8; +pub const UT_HOSTSIZE: usize = 256; + pub const O_CLOEXEC: ::c_int = 0x10000; pub const MS_SYNC : ::c_int = 0x0002; @@ -175,6 +174,8 @@ pub const EIPSEC : ::c_int = 82; pub const ENOMEDIUM : ::c_int = 85; pub const EMEDIUMTYPE : ::c_int = 86; +pub const EAI_SYSTEM: ::c_int = -11; + pub const RUSAGE_THREAD: ::c_int = 1; pub const MAP_COPY : ::c_int = 0x0002; @@ -247,6 +248,44 @@ pub const PTHREAD_MUTEX_NORMAL: ::c_int = 3; pub const PTHREAD_MUTEX_STRICT_NP: ::c_int = 4; pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_STRICT_NP; +pub const EVFILT_AIO: ::int16_t = -3; +pub const EVFILT_PROC: ::int16_t = -5; +pub const EVFILT_READ: ::int16_t = -1; +pub const EVFILT_SIGNAL: ::int16_t = -6; +pub const EVFILT_TIMER: ::int16_t = -7; +pub const EVFILT_VNODE: ::int16_t = -4; +pub const EVFILT_WRITE: ::int16_t = -2; + +pub const EV_ADD: ::uint16_t = 0x1; +pub const EV_DELETE: ::uint16_t = 0x2; +pub const EV_ENABLE: ::uint16_t = 0x4; +pub const EV_DISABLE: ::uint16_t = 0x8; +pub const EV_ONESHOT: ::uint16_t = 0x10; +pub const EV_CLEAR: ::uint16_t = 0x20; +pub const EV_FLAG1: ::uint16_t = 0x2000; +pub const EV_ERROR: ::uint16_t = 0x4000; +pub const EV_EOF: ::uint16_t = 0x8000; +pub const EV_SYSFLAGS: ::uint16_t = 0xf000; + +pub const NOTE_LOWAT: ::uint32_t = 0x00000001; +pub const NOTE_EOF: ::uint32_t = 0x00000002; +pub const NOTE_DELETE: ::uint32_t = 0x00000001; +pub const NOTE_WRITE: ::uint32_t = 0x00000002; +pub const NOTE_EXTEND: ::uint32_t = 0x00000004; +pub const NOTE_ATTRIB: ::uint32_t = 0x00000008; +pub const NOTE_LINK: ::uint32_t = 0x00000010; +pub const NOTE_RENAME: ::uint32_t = 0x00000020; +pub const NOTE_REVOKE: ::uint32_t = 0x00000040; +pub const NOTE_TRUNCATE: ::uint32_t = 0x00000080; +pub const NOTE_EXIT: ::uint32_t = 0x80000000; +pub const NOTE_FORK: ::uint32_t = 0x40000000; +pub const NOTE_EXEC: ::uint32_t = 0x20000000; +pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff; +pub const NOTE_PCTRLMASK: ::uint32_t = 0xf0000000; +pub const NOTE_TRACK: ::uint32_t = 0x00000001; +pub const NOTE_TRACKERR: ::uint32_t = 0x00000002; +pub const NOTE_CHILD: ::uint32_t = 0x00000004; + pub const TMP_MAX : ::c_uint = 0x7fffffff; pub const NI_MAXHOST: ::size_t = 256; @@ -296,7 +335,6 @@ pub const KERN_OSVERSION: ::c_int = 27; pub const KERN_SOMAXCONN: ::c_int = 28; pub const KERN_SOMINCONN: ::c_int = 29; pub const KERN_USERMOUNT: ::c_int = 30; -pub const KERN_RND: ::c_int = 31; pub const KERN_NOSUIDCOREDUMP: ::c_int = 32; pub const KERN_FSYNC: ::c_int = 33; pub const KERN_SYSVMSG: ::c_int = 34; @@ -375,6 +413,12 @@ extern { serv: *mut ::c_char, servlen: ::size_t, flags: ::c_int) -> ::c_int; + pub fn kevent(kq: ::c_int, + changelist: *const ::kevent, + nchanges: ::c_int, + eventlist: *mut ::kevent, + nevents: ::c_int, + timeout: *const ::timespec) -> ::c_int; pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int) -> ::c_int; pub fn pthread_main_np() -> ::c_int; @@ -390,3 +434,15 @@ extern { -> ::c_int; pub fn getentropy(buf: *mut ::c_void, buflen: ::size_t) -> ::c_int; } + +cfg_if! { + if #[cfg(target_os = "openbsd")] { + mod openbsd; + pub use self::openbsd::*; + } else if #[cfg(target_os = "bitrig")] { + mod bitrig; + pub use self::bitrig::*; + } else { + // Unknown target_os + } +} diff --git a/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/openbsd.rs b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/openbsd.rs new file mode 100644 index 0000000000..b7fed64ede --- /dev/null +++ b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/openbsd.rs @@ -0,0 +1,28 @@ +s! { + pub struct lconv { + pub decimal_point: *mut ::c_char, + pub thousands_sep: *mut ::c_char, + pub grouping: *mut ::c_char, + pub int_curr_symbol: *mut ::c_char, + pub currency_symbol: *mut ::c_char, + pub mon_decimal_point: *mut ::c_char, + pub mon_thousands_sep: *mut ::c_char, + pub mon_grouping: *mut ::c_char, + pub positive_sign: *mut ::c_char, + pub negative_sign: *mut ::c_char, + pub int_frac_digits: ::c_char, + pub frac_digits: ::c_char, + pub p_cs_precedes: ::c_char, + pub p_sep_by_space: ::c_char, + pub n_cs_precedes: ::c_char, + pub n_sep_by_space: ::c_char, + pub p_sign_posn: ::c_char, + pub n_sign_posn: ::c_char, + pub int_p_cs_precedes: ::c_char, + pub int_p_sep_by_space: ::c_char, + pub int_n_cs_precedes: ::c_char, + pub int_n_sep_by_space: ::c_char, + pub int_p_sign_posn: ::c_char, + pub int_n_sign_posn: ::c_char, + } +} diff --git a/src/liblibc/src/unix/bsd/openbsdlike/bitrig.rs b/src/liblibc/src/unix/bsd/openbsdlike/bitrig.rs deleted file mode 100644 index cb1eae8a24..0000000000 --- a/src/liblibc/src/unix/bsd/openbsdlike/bitrig.rs +++ /dev/null @@ -1,435 +0,0 @@ -pub type clock_t = i64; -pub type suseconds_t = i64; -pub type dev_t = i32; -pub type sigset_t = ::c_uint; -pub type blksize_t = ::uint32_t; -pub type fsblkcnt_t = ::c_uint; -pub type fsfilcnt_t = ::c_uint; -pub type pthread_attr_t = *mut ::c_void; -pub type pthread_mutex_t = *mut ::c_void; -pub type pthread_mutexattr_t = *mut ::c_void; -pub type pthread_cond_t = *mut ::c_void; -pub type pthread_condattr_t = *mut ::c_void; -pub type pthread_rwlock_t = *mut ::c_void; - -s! { - pub struct dirent { - pub d_fileno: ::ino_t, - pub d_off: ::off_t, - pub d_reclen: u16, - pub d_type: u8, - pub d_namlen: u8, - __d_padding: [u8; 4], - pub d_name: [::c_char; 256], - } - - pub struct glob_t { - pub gl_pathc: ::c_int, - pub gl_matchc: ::c_int, - pub gl_offs: ::c_int, - pub gl_flags: ::c_int, - pub gl_pathv: *mut *mut ::c_char, - __unused1: *mut ::c_void, - __unused2: *mut ::c_void, - __unused3: *mut ::c_void, - __unused4: *mut ::c_void, - __unused5: *mut ::c_void, - __unused6: *mut ::c_void, - __unused7: *mut ::c_void, - } - - pub struct stat { - pub st_mode: ::mode_t, - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_size: ::off_t, - pub st_blocks: ::blkcnt_t, - pub st_blksize: ::blksize_t, - pub st_flags: ::uint32_t, - pub st_gen: ::uint32_t, - pub st_birthtime: ::time_t, - pub st_birthtime_nsec: ::c_long, - } - - pub struct statvfs { - pub f_bsize: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_favail: ::fsfilcnt_t, - pub f_fsid: ::c_ulong, - pub f_flag: ::c_ulong, - pub f_namemax: ::c_ulong, - } - - pub struct addrinfo { - pub ai_flags: ::c_int, - pub ai_family: ::c_int, - pub ai_socktype: ::c_int, - pub ai_protocol: ::c_int, - pub ai_addrlen: ::socklen_t, - pub ai_addr: *mut ::sockaddr, - pub ai_canonname: *mut ::c_char, - pub ai_next: *mut ::addrinfo, - } - - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: ::sa_family_t, - __ss_pad1: [u8; 6], - __ss_pad2: i64, - __ss_pad3: [u8; 240], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_code: ::c_int, - pub si_errno: ::c_int, - pub si_addr: *mut ::c_void - } - - pub struct Dl_info { - pub dli_fname: *const ::c_char, - pub dli_fbase: *mut ::c_void, - pub dli_sname: *const ::c_char, - pub dli_saddr: *mut ::c_void, - } - - pub struct lconv { - pub decimal_point: *mut ::c_char, - pub thousands_sep: *mut ::c_char, - pub grouping: *mut ::c_char, - pub int_curr_symbol: *mut ::c_char, - pub currency_symbol: *mut ::c_char, - pub mon_decimal_point: *mut ::c_char, - pub mon_thousands_sep: *mut ::c_char, - pub mon_grouping: *mut ::c_char, - pub positive_sign: *mut ::c_char, - pub negative_sign: *mut ::c_char, - pub int_frac_digits: ::c_char, - pub frac_digits: ::c_char, - pub p_cs_precedes: ::c_char, - pub p_sep_by_space: ::c_char, - pub n_cs_precedes: ::c_char, - pub n_sep_by_space: ::c_char, - pub p_sign_posn: ::c_char, - pub n_sign_posn: ::c_char, - pub int_p_cs_precedes: ::c_char, - pub int_n_cs_precedes: ::c_char, - pub int_p_sep_by_space: ::c_char, - pub int_n_sep_by_space: ::c_char, - pub int_p_sign_posn: ::c_char, - pub int_n_sign_posn: ::c_char, - } -} - -pub const LC_COLLATE_MASK: ::c_int = (1 << 0); -pub const LC_CTYPE_MASK: ::c_int = (1 << 1); -pub const LC_MESSAGES_MASK: ::c_int = (1 << 2); -pub const LC_MONETARY_MASK: ::c_int = (1 << 3); -pub const LC_NUMERIC_MASK: ::c_int = (1 << 4); -pub const LC_TIME_MASK: ::c_int = (1 << 5); -pub const LC_ALL_MASK: ::c_int = LC_COLLATE_MASK - | LC_CTYPE_MASK - | LC_MESSAGES_MASK - | LC_MONETARY_MASK - | LC_NUMERIC_MASK - | LC_TIME_MASK; - -pub const ERA: ::nl_item = 52; -pub const ERA_D_FMT: ::nl_item = 53; -pub const ERA_D_T_FMT: ::nl_item = 54; -pub const ERA_T_FMT: ::nl_item = 55; -pub const ALT_DIGITS: ::nl_item = 56; - -pub const D_MD_ORDER: ::nl_item = 57; - -pub const ALTMON_1: ::nl_item = 58; -pub const ALTMON_2: ::nl_item = 59; -pub const ALTMON_3: ::nl_item = 60; -pub const ALTMON_4: ::nl_item = 61; -pub const ALTMON_5: ::nl_item = 62; -pub const ALTMON_6: ::nl_item = 63; -pub const ALTMON_7: ::nl_item = 64; -pub const ALTMON_8: ::nl_item = 65; -pub const ALTMON_9: ::nl_item = 66; -pub const ALTMON_10: ::nl_item = 67; -pub const ALTMON_11: ::nl_item = 68; -pub const ALTMON_12: ::nl_item = 69; - -pub const O_CLOEXEC: ::c_int = 0x10000; - -pub const MS_SYNC : ::c_int = 0x0002; -pub const MS_INVALIDATE : ::c_int = 0x0004; - -pub const PTHREAD_STACK_MIN : ::size_t = 2048; - -pub const ENOATTR : ::c_int = 83; -pub const EILSEQ : ::c_int = 84; -pub const EOVERFLOW : ::c_int = 87; -pub const ECANCELED : ::c_int = 88; -pub const EIDRM : ::c_int = 89; -pub const ENOMSG : ::c_int = 90; -pub const ENOTSUP : ::c_int = 91; -pub const ELAST : ::c_int = 91; - -pub const F_DUPFD_CLOEXEC : ::c_int = 10; - -pub const RLIM_NLIMITS: ::c_int = 9; - -pub const SO_SNDTIMEO: ::c_int = 0x1005; -pub const SO_RCVTIMEO: ::c_int = 0x1006; - -pub const O_DSYNC : ::c_int = 128; - -pub const MAP_RENAME : ::c_int = 0x0000; -pub const MAP_NORESERVE : ::c_int = 0x0000; -pub const MAP_HASSEMAPHORE : ::c_int = 0x0000; - -pub const EIPSEC : ::c_int = 82; -pub const ENOMEDIUM : ::c_int = 85; -pub const EMEDIUMTYPE : ::c_int = 86; - -pub const RUSAGE_THREAD: ::c_int = 1; - -pub const IPV6_ADD_MEMBERSHIP: ::c_int = 12; -pub const IPV6_DROP_MEMBERSHIP: ::c_int = 13; - -pub const MAP_COPY : ::c_int = 0x0002; -pub const MAP_NOEXTEND : ::c_int = 0x0000; - -pub const _SC_IOV_MAX : ::c_int = 51; -pub const _SC_GETGR_R_SIZE_MAX : ::c_int = 100; -pub const _SC_GETPW_R_SIZE_MAX : ::c_int = 101; -pub const _SC_LOGIN_NAME_MAX : ::c_int = 102; -pub const _SC_MQ_PRIO_MAX : ::c_int = 59; -pub const _SC_NPROCESSORS_ONLN : ::c_int = 503; -pub const _SC_THREADS : ::c_int = 91; -pub const _SC_THREAD_ATTR_STACKADDR : ::c_int = 77; -pub const _SC_THREAD_ATTR_STACKSIZE : ::c_int = 78; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS : ::c_int = 80; -pub const _SC_THREAD_KEYS_MAX : ::c_int = 81; -pub const _SC_THREAD_PRIO_INHERIT : ::c_int = 82; -pub const _SC_THREAD_PRIO_PROTECT : ::c_int = 83; -pub const _SC_THREAD_PRIORITY_SCHEDULING : ::c_int = 84; -pub const _SC_THREAD_PROCESS_SHARED : ::c_int = 85; -pub const _SC_THREAD_SAFE_FUNCTIONS : ::c_int = 103; -pub const _SC_THREAD_STACK_MIN : ::c_int = 89; -pub const _SC_THREAD_THREADS_MAX : ::c_int = 90; -pub const _SC_TTY_NAME_MAX : ::c_int = 107; -pub const _SC_ATEXIT_MAX : ::c_int = 46; -pub const _SC_CLK_TCK : ::c_int = 3; -pub const _SC_AIO_LISTIO_MAX : ::c_int = 42; -pub const _SC_AIO_MAX : ::c_int = 43; -pub const _SC_ASYNCHRONOUS_IO : ::c_int = 45; -pub const _SC_MAPPED_FILES : ::c_int = 53; -pub const _SC_MEMLOCK : ::c_int = 54; -pub const _SC_MEMLOCK_RANGE : ::c_int = 55; -pub const _SC_MEMORY_PROTECTION : ::c_int = 56; -pub const _SC_MESSAGE_PASSING : ::c_int = 57; -pub const _SC_MQ_OPEN_MAX : ::c_int = 58; -pub const _SC_PRIORITY_SCHEDULING : ::c_int = 61; -pub const _SC_SEMAPHORES : ::c_int = 67; -pub const _SC_SHARED_MEMORY_OBJECTS : ::c_int = 68; -pub const _SC_SYNCHRONIZED_IO : ::c_int = 75; -pub const _SC_TIMERS : ::c_int = 94; -pub const _SC_XOPEN_CRYPT : ::c_int = 117; -pub const _SC_XOPEN_ENH_I18N : ::c_int = 118; -pub const _SC_XOPEN_LEGACY : ::c_int = 119; -pub const _SC_XOPEN_REALTIME : ::c_int = 120; -pub const _SC_XOPEN_REALTIME_THREADS : ::c_int = 121; -pub const _SC_XOPEN_UNIX : ::c_int = 123; -pub const _SC_XOPEN_VERSION : ::c_int = 125; -pub const _SC_SEM_NSEMS_MAX : ::c_int = 31; -pub const _SC_SEM_VALUE_MAX : ::c_int = 32; -pub const _SC_AIO_PRIO_DELTA_MAX : ::c_int = 44; -pub const _SC_DELAYTIMER_MAX : ::c_int = 50; -pub const _SC_PRIORITIZED_IO : ::c_int = 60; -pub const _SC_REALTIME_SIGNALS : ::c_int = 64; -pub const _SC_RTSIG_MAX : ::c_int = 66; -pub const _SC_SIGQUEUE_MAX : ::c_int = 70; -pub const _SC_TIMER_MAX : ::c_int = 93; -pub const _SC_HOST_NAME_MAX: ::c_int = 33; - -pub const FD_SETSIZE: usize = 1024; - -pub const ST_NOSUID: ::c_ulong = 2; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = 0 as *mut _; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = 0 as *mut _; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = 0 as *mut _; - -pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 1; -pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 2; -pub const PTHREAD_MUTEX_NORMAL: ::c_int = 3; -pub const PTHREAD_MUTEX_STRICT_NP: ::c_int = 4; -pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_STRICT_NP; - -pub const TMP_MAX : ::c_uint = 0x7fffffff; - -pub const NI_MAXHOST: ::size_t = 256; - -pub const CTL_MAXNAME: ::c_int = 12; -pub const CTLTYPE_NODE: ::c_int = 1; -pub const CTLTYPE_INT: ::c_int = 2; -pub const CTLTYPE_STRING: ::c_int = 3; -pub const CTLTYPE_QUAD: ::c_int = 4; -pub const CTLTYPE_STRUCT: ::c_int = 5; -pub const CTL_UNSPEC: ::c_int = 0; -pub const CTL_KERN: ::c_int = 1; -pub const CTL_VM: ::c_int = 2; -pub const CTL_FS: ::c_int = 3; -pub const CTL_NET: ::c_int = 4; -pub const CTL_DEBUG: ::c_int = 5; -pub const CTL_HW: ::c_int = 6; -pub const CTL_MACHDEP: ::c_int = 7; -pub const CTL_DDB: ::c_int = 9; -pub const CTL_VFS: ::c_int = 10; -pub const CTL_MAXID: ::c_int = 11; -pub const KERN_OSTYPE: ::c_int = 1; -pub const KERN_OSRELEASE: ::c_int = 2; -pub const KERN_OSREV: ::c_int = 3; -pub const KERN_VERSION: ::c_int = 4; -pub const KERN_MAXVNODES: ::c_int = 5; -pub const KERN_MAXPROC: ::c_int = 6; -pub const KERN_MAXFILES: ::c_int = 7; -pub const KERN_ARGMAX: ::c_int = 8; -pub const KERN_SECURELVL: ::c_int = 9; -pub const KERN_HOSTNAME: ::c_int = 10; -pub const KERN_HOSTID: ::c_int = 11; -pub const KERN_CLOCKRATE: ::c_int = 12; -pub const KERN_PROF: ::c_int = 16; -pub const KERN_POSIX1: ::c_int = 17; -pub const KERN_NGROUPS: ::c_int = 18; -pub const KERN_JOB_CONTROL: ::c_int = 19; -pub const KERN_SAVED_IDS: ::c_int = 20; -pub const KERN_BOOTTIME: ::c_int = 21; -pub const KERN_DOMAINNAME: ::c_int = 22; -pub const KERN_MAXPARTITIONS: ::c_int = 23; -pub const KERN_RAWPARTITION: ::c_int = 24; -pub const KERN_MAXTHREAD: ::c_int = 25; -pub const KERN_NTHREADS: ::c_int = 26; -pub const KERN_OSVERSION: ::c_int = 27; -pub const KERN_SOMAXCONN: ::c_int = 28; -pub const KERN_SOMINCONN: ::c_int = 29; -pub const KERN_USERMOUNT: ::c_int = 30; -pub const KERN_RND: ::c_int = 31; -pub const KERN_NOSUIDCOREDUMP: ::c_int = 32; -pub const KERN_FSYNC: ::c_int = 33; -pub const KERN_SYSVMSG: ::c_int = 34; -pub const KERN_SYSVSEM: ::c_int = 35; -pub const KERN_SYSVSHM: ::c_int = 36; -pub const KERN_ARND: ::c_int = 37; -pub const KERN_MSGBUFSIZE: ::c_int = 38; -pub const KERN_MALLOCSTATS: ::c_int = 39; -pub const KERN_CPTIME: ::c_int = 40; -pub const KERN_NCHSTATS: ::c_int = 41; -pub const KERN_FORKSTAT: ::c_int = 42; -pub const KERN_NSELCOLL: ::c_int = 43; -pub const KERN_TTY: ::c_int = 44; -pub const KERN_CCPU: ::c_int = 45; -pub const KERN_FSCALE: ::c_int = 46; -pub const KERN_NPROCS: ::c_int = 47; -pub const KERN_MSGBUF: ::c_int = 48; -pub const KERN_POOL: ::c_int = 49; -pub const KERN_STACKGAPRANDOM: ::c_int = 50; -pub const KERN_SYSVIPC_INFO: ::c_int = 51; -pub const KERN_SPLASSERT: ::c_int = 54; -pub const KERN_PROC_ARGS: ::c_int = 55; -pub const KERN_NFILES: ::c_int = 56; -pub const KERN_TTYCOUNT: ::c_int = 57; -pub const KERN_NUMVNODES: ::c_int = 58; -pub const KERN_MBSTAT: ::c_int = 59; -pub const KERN_SEMINFO: ::c_int = 61; -pub const KERN_SHMINFO: ::c_int = 62; -pub const KERN_INTRCNT: ::c_int = 63; -pub const KERN_WATCHDOG: ::c_int = 64; -pub const KERN_PROC: ::c_int = 66; -pub const KERN_MAXCLUSTERS: ::c_int = 67; -pub const KERN_EVCOUNT: ::c_int = 68; -pub const KERN_TIMECOUNTER: ::c_int = 69; -pub const KERN_MAXLOCKSPERUID: ::c_int = 70; -pub const KERN_CPTIME2: ::c_int = 71; -pub const KERN_CACHEPCT: ::c_int = 72; -pub const KERN_FILE: ::c_int = 73; -pub const KERN_CONSDEV: ::c_int = 75; -pub const KERN_NETLIVELOCKS: ::c_int = 76; -pub const KERN_POOL_DEBUG: ::c_int = 77; -pub const KERN_PROC_CWD: ::c_int = 78; -pub const KERN_PROC_NOBROADCASTKILL: ::c_int = 79; -pub const KERN_PROC_VMMAP: ::c_int = 80; -pub const KERN_GLOBAL_PTRACE: ::c_int = 81; -pub const KERN_CONSBUFSIZE: ::c_int = 82; -pub const KERN_CONSBUF: ::c_int = 83; -pub const KERN_MAXID: ::c_int = 84; -pub const KERN_PROC_ALL: ::c_int = 0; -pub const KERN_PROC_PID: ::c_int = 1; -pub const KERN_PROC_PGRP: ::c_int = 2; -pub const KERN_PROC_SESSION: ::c_int = 3; -pub const KERN_PROC_TTY: ::c_int = 4; -pub const KERN_PROC_UID: ::c_int = 5; -pub const KERN_PROC_RUID: ::c_int = 6; -pub const KERN_PROC_KTHREAD: ::c_int = 7; -pub const KERN_PROC_SHOW_THREADS: ::c_int = 0x40000000; -pub const KERN_SYSVIPC_MSG_INFO: ::c_int = 1; -pub const KERN_SYSVIPC_SEM_INFO: ::c_int = 2; -pub const KERN_SYSVIPC_SHM_INFO: ::c_int = 3; -pub const KERN_PROC_ARGV: ::c_int = 1; -pub const KERN_PROC_NARGV: ::c_int = 2; -pub const KERN_PROC_ENV: ::c_int = 3; -pub const KERN_PROC_NENV: ::c_int = 4; -pub const KI_NGROUPS: ::c_int = 16; -pub const KI_MAXCOMLEN: ::c_int = 24; -pub const KI_WMESGLEN: ::c_int = 8; -pub const KI_MAXLOGNAME: ::c_int = 32; -pub const KI_EMULNAMELEN: ::c_int = 8; - -extern { - pub fn getnameinfo(sa: *const ::sockaddr, - salen: ::socklen_t, - host: *mut ::c_char, - hostlen: ::size_t, - serv: *mut ::c_char, - servlen: ::size_t, - flags: ::c_int) -> ::c_int; - pub fn mprotect(addr: *const ::c_void, len: ::size_t, prot: ::c_int) - -> ::c_int; - pub fn pthread_main_np() -> ::c_int; - pub fn pthread_set_name_np(tid: ::pthread_t, name: *const ::c_char); - pub fn pthread_stackseg_np(thread: ::pthread_t, - sinfo: *mut ::stack_t) -> ::c_int; - pub fn sysctl(name: *mut ::c_int, - namelen: ::c_uint, - oldp: *mut ::c_void, - oldlenp: *mut ::size_t, - newp: *mut ::c_void, - newlen: ::size_t) - -> ::c_int; - pub fn sysctlbyname(name: *const ::c_char, - oldp: *mut ::c_void, - oldlenp: *mut ::size_t, - newp: *mut ::c_void, - newlen: ::size_t) - -> ::c_int; - pub fn nl_langinfo_l(item: ::nl_item, locale: ::locale_t) -> *mut ::c_char; - pub fn duplocale(base: ::locale_t) -> ::locale_t; - pub fn freelocale(loc: ::locale_t) -> ::c_int; - pub fn newlocale(mask: ::c_int, - locale: *const ::c_char, - base: ::locale_t) -> ::locale_t; - pub fn uselocale(loc: ::locale_t) -> ::locale_t; - pub fn querylocale(mask: ::c_int, loc: ::locale_t) -> *const ::c_char; -} diff --git a/src/liblibc/src/unix/haiku/b32.rs b/src/liblibc/src/unix/haiku/b32.rs new file mode 100644 index 0000000000..9b0b338b91 --- /dev/null +++ b/src/liblibc/src/unix/haiku/b32.rs @@ -0,0 +1,2 @@ +pub type c_long = i32; +pub type c_ulong = u32; diff --git a/src/liblibc/src/unix/haiku/b64.rs b/src/liblibc/src/unix/haiku/b64.rs new file mode 100644 index 0000000000..5d63ce9ce4 --- /dev/null +++ b/src/liblibc/src/unix/haiku/b64.rs @@ -0,0 +1,2 @@ +pub type c_ulong = u64; +pub type c_long = i64; diff --git a/src/liblibc/src/unix/haiku/mod.rs b/src/liblibc/src/unix/haiku/mod.rs new file mode 100644 index 0000000000..a241a02054 --- /dev/null +++ b/src/liblibc/src/unix/haiku/mod.rs @@ -0,0 +1,748 @@ +use dox::mem; + +pub type rlim_t = ::uintptr_t; +pub type sa_family_t = u8; +pub type pthread_key_t = ::c_int; +pub type nfds_t = ::c_long; +pub type tcflag_t = ::c_uint; +pub type speed_t = ::c_uint; +pub type c_char = i8; +pub type clock_t = i32; +pub type clockid_t = i32; +pub type time_t = i32; +pub type suseconds_t = i32; +pub type wchar_t = i32; +pub type off_t = i64; +pub type ino_t = i64; +pub type blkcnt_t = i64; +pub type blksize_t = i32; +pub type dev_t = i32; +pub type mode_t = u32; +pub type nlink_t = i32; +pub type useconds_t = u32; +pub type socklen_t = u32; +pub type pthread_t = ::uintptr_t; +pub type pthread_mutexattr_t = ::uintptr_t; +pub type sigset_t = u64; +pub type fsblkcnt_t = i64; +pub type fsfilcnt_t = i64; +pub type pthread_attr_t = *mut ::c_void; +pub type nl_item = ::c_int; + +pub enum timezone {} + +s! { + pub struct sockaddr { + pub sa_len: u8, + pub sa_family: sa_family_t, + pub sa_data: [::c_char; 30], + } + + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: sa_family_t, + pub sin_port: ::in_port_t, + pub sin_addr: ::in_addr, + pub sin_zero: [u8; 24], + } + + pub struct sockaddr_in6 { + pub sin6_len: u8, + pub sin6_family: sa_family_t, + pub sin6_port: ::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: ::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct sockaddr_un { + pub sun_len: u8, + pub sun_family: sa_family_t, + pub sun_path: [::c_char; 126] + } + + pub struct sockaddr_storage { + pub ss_len: u8, + pub ss_family: sa_family_t, + __ss_pad1: [u8; 6], + __ss_pad2: u64, + __ss_pad3: [u8; 112], + } + + pub struct addrinfo { + pub ai_flags: ::c_int, + pub ai_family: ::c_int, + pub ai_socktype: ::c_int, + pub ai_protocol: ::c_int, + pub ai_addrlen: socklen_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut ::sockaddr, + pub ai_next: *mut addrinfo, + } + + pub struct fd_set { + fds_bits: [c_ulong; FD_SETSIZE / ULONG_SIZE], + } + + pub struct tm { + pub tm_sec: ::c_int, + pub tm_min: ::c_int, + pub tm_hour: ::c_int, + pub tm_mday: ::c_int, + pub tm_mon: ::c_int, + pub tm_year: ::c_int, + pub tm_wday: ::c_int, + pub tm_yday: ::c_int, + pub tm_isdst: ::c_int, + pub tm_gmtoff: ::c_long, + pub tm_zone: *const ::c_char, + } + + pub struct utsname { + pub sysname: [::c_char; 32], + pub nodename: [::c_char; 32], + pub release: [::c_char; 32], + pub version: [::c_char; 32], + pub machine: [::c_char; 32], + } + + pub struct lconv { + pub decimal_point: *mut ::c_char, + pub thousands_sep: *mut ::c_char, + pub grouping: *mut ::c_char, + pub int_curr_symbol: *mut ::c_char, + pub currency_symbol: *mut ::c_char, + pub mon_decimal_point: *mut ::c_char, + pub mon_thousands_sep: *mut ::c_char, + pub mon_grouping: *mut ::c_char, + pub positive_sign: *mut ::c_char, + pub negative_sign: *mut ::c_char, + pub int_frac_digits: ::c_char, + pub frac_digits: ::c_char, + pub p_cs_precedes: ::c_char, + pub p_sep_by_space: ::c_char, + pub n_cs_precedes: ::c_char, + pub n_sep_by_space: ::c_char, + pub p_sign_posn: ::c_char, + pub n_sign_posn: ::c_char, + pub int_p_cs_precedes: ::c_char, + pub int_p_sep_by_space: ::c_char, + pub int_n_cs_precedes: ::c_char, + pub int_n_sep_by_space: ::c_char, + pub int_p_sign_posn: ::c_char, + pub int_n_sign_posn: ::c_char, + } + + pub struct msghdr { + pub msg_name: *mut ::c_void, + pub msg_namelen: ::socklen_t, + pub msg_iov: *mut ::iovec, + pub msg_iovlen: ::c_int, + pub msg_control: *mut ::c_void, + pub msg_controllen: ::socklen_t, + pub msg_flags: ::c_int, + } + + pub struct Dl_info { + pub dli_fname: *const ::c_char, + pub dli_fbase: *mut ::c_void, + pub dli_sname: *const ::c_char, + pub dli_saddr: *mut ::c_void, + } + + pub struct termios { + pub c_iflag: ::tcflag_t, + pub c_oflag: ::tcflag_t, + pub c_cflag: ::tcflag_t, + pub c_lflag: ::tcflag_t, + pub c_line: ::c_char, + pub c_ispeed: ::speed_t, + pub c_ospeed: ::speed_t, + pub c_cc: [::cc_t; ::NCCS], + } + + pub struct stat { + pub st_dev: dev_t, + pub st_ino: ino_t, + pub st_mode: mode_t, + pub st_nlink: nlink_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + pub st_size: off_t, + pub st_rdev: dev_t, + pub st_blksize: blksize_t, + pub st_atime: time_t, + pub st_atime_nsec: c_long, + pub st_mtime: time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: time_t, + pub st_ctime_nsec: c_long, + pub st_crtime: time_t, + pub st_crtime_nsec: c_long, + pub st_type: u32, + pub st_blocks: blkcnt_t, + } + + pub struct dirent { + pub d_dev: dev_t, + pub d_pdev: dev_t, + pub d_ino: ino_t, + pub d_pino: i64, + pub d_reclen: ::c_ushort, + pub d_name: [::c_char; 1024], // Max length is _POSIX_PATH_MAX + } + + pub struct glob_t { + pub gl_pathc: ::size_t, + __unused1: ::size_t, + pub gl_offs: ::size_t, + __unused2: ::size_t, + pub gl_pathv: *mut *mut c_char, + + __unused3: *mut ::c_void, + __unused4: *mut ::c_void, + __unused5: *mut ::c_void, + __unused6: *mut ::c_void, + __unused7: *mut ::c_void, + __unused8: *mut ::c_void, + } + + pub struct pthread_mutex_t { + flags: u32, + lock: i32, + unused: i32, + owner: i32, + owner_count: i32, + } + + pub struct pthread_cond_t { + flags: u32, + unused: i32, + mutex: *mut ::c_void, + waiter_count: i32, + lock: i32, + } + + pub struct pthread_rwlock_t { + flags: u32, + owner: i32, + lock_sem: i32, // this is actually a union + lock_count: i32, + reader_count: i32, + writer_count: i32, + waiters: [*mut ::c_void; 2], + } + + pub struct passwd { + pub pw_name: *mut ::c_char, + pub pw_passwd: *mut ::c_char, + pub pw_uid: ::uid_t, + pub pw_gid: ::gid_t, + pub pw_dir: *mut ::c_char, + pub pw_shell: *mut ::c_char, + pub pw_gecos: *mut ::c_char, + } + + pub struct statvfs { + pub f_bsize: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_favail: ::fsfilcnt_t, + pub f_fsid: ::c_ulong, + pub f_flag: ::c_ulong, + pub f_namemax: ::c_ulong, + } + + pub struct stack_t { + pub ss_sp: *mut ::c_void, + pub ss_size: ::size_t, + pub ss_flags: ::c_int, + } + + pub struct siginfo_t { + pub si_signo: ::c_int, + pub si_code: ::c_int, + pub si_errno: ::c_int, + pub si_pid: ::pid_t, + pub si_uid: ::uid_t, + pub si_addr: *mut ::c_void, + pub si_status: ::c_int, + pub si_band: c_long, + pub sigval: *mut ::c_void, + } + + pub struct sigaction { + pub sa_sigaction: ::sighandler_t, + pub sa_mask: ::sigset_t, + pub sa_flags: ::c_int, + sa_userdata: *mut ::c_void, + } + + pub struct sem_t { + pub se_type: i32, + pub se_named_id: i32, // this is actually a union + pub se_unnamed: i32, + pub se_padding: [i32; 4], + } + + pub struct pthread_condattr_t { + pub process_shared: bool, + pub clock_id: i32, + } +} + +// intentionally not public, only used for fd_set +cfg_if! { + if #[cfg(target_pointer_width = "32")] { + const ULONG_SIZE: usize = 32; + } else if #[cfg(target_pointer_width = "64")] { + const ULONG_SIZE: usize = 64; + } else { + // Unknown target_pointer_width + } +} + +pub const EXIT_FAILURE: ::c_int = 1; +pub const EXIT_SUCCESS: ::c_int = 0; +pub const RAND_MAX: ::c_int = 2147483647; +pub const EOF: ::c_int = -1; +pub const SEEK_SET: ::c_int = 0; +pub const SEEK_CUR: ::c_int = 1; +pub const SEEK_END: ::c_int = 2; +pub const _IOFBF: ::c_int = 0; +pub const _IONBF: ::c_int = 2; +pub const _IOLBF: ::c_int = 1; + +pub const F_DUPFD: ::c_int = 0x0001; +pub const F_GETFD: ::c_int = 0x0002; +pub const F_SETFD: ::c_int = 0x0004; +pub const F_GETFL: ::c_int = 0x0008; +pub const F_SETFL: ::c_int = 0x0010; + +pub const SIGTRAP: ::c_int = 22; + +pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0; +pub const PTHREAD_CREATE_DETACHED: ::c_int = 1; + +pub const CLOCK_REALTIME: ::c_int = -1; +pub const CLOCK_MONOTONIC: ::c_int = 0; + +pub const RLIMIT_CORE: ::c_int = 0; +pub const RLIMIT_CPU: ::c_int = 1; +pub const RLIMIT_DATA: ::c_int = 2; +pub const RLIMIT_FSIZE: ::c_int = 3; +pub const RLIMIT_NOFILE: ::c_int = 4; +pub const RLIMIT_AS: ::c_int = 6; +// Haiku specific +pub const RLIMIT_NOVMON: ::c_int = 7; +pub const RLIMIT_NLIMITS: ::c_int = 8; + +pub const RUSAGE_SELF: ::c_int = 0; + +pub const NCCS: usize = 11; + +pub const O_RDONLY: ::c_int = 0x0000; +pub const O_WRONLY: ::c_int = 0x0001; +pub const O_RDWR: ::c_int = 0x0002; +pub const O_ACCMODE: ::c_int = 0x0003; + +pub const O_EXCL: ::c_int = 0x0100; +pub const O_CREAT: ::c_int = 0x0200; +pub const O_TRUNC: ::c_int = 0x0400; +pub const O_NOCTTY: ::c_int = 0x1000; +pub const O_NOTRAVERSE: ::c_int = 0x2000; + +pub const O_CLOEXEC: ::c_int = 0x00000040; +pub const O_NONBLOCK: ::c_int = 0x00000080; +pub const O_APPEND: ::c_int = 0x00000800; +pub const O_SYNC: ::c_int = 0x00010000; +pub const O_RSYNC: ::c_int = 0x00020000; +pub const O_DSYNC: ::c_int = 0x00040000; +pub const O_NOFOLLOW: ::c_int = 0x00080000; +pub const O_NOCACHE: ::c_int = 0x00100000; +pub const O_DIRECTORY: ::c_int = 0x00200000; + +pub const S_IFIFO: ::mode_t = 61440; +pub const S_IFCHR: ::mode_t = 49152; +pub const S_IFBLK: ::mode_t = 24576; +pub const S_IFDIR: ::mode_t = 16384; +pub const S_IFREG: ::mode_t = 32768; +pub const S_IFLNK: ::mode_t = 40960; +pub const S_IFSOCK: ::mode_t = 49152; +pub const S_IFMT: ::mode_t = 61440; +pub const S_IRWXU: ::mode_t = 448; +pub const S_IXUSR: ::mode_t = 64; +pub const S_IWUSR: ::mode_t = 128; +pub const S_IRUSR: ::mode_t = 256; +pub const S_IRWXG: ::mode_t = 70; +pub const S_IXGRP: ::mode_t = 10; +pub const S_IWGRP: ::mode_t = 20; +pub const S_IRGRP: ::mode_t = 40; +pub const S_IRWXO: ::mode_t = 7; +pub const S_IXOTH: ::mode_t = 1; +pub const S_IWOTH: ::mode_t = 2; +pub const S_IROTH: ::mode_t = 4; +pub const F_OK: ::c_int = 0; +pub const R_OK: ::c_int = 4; +pub const W_OK: ::c_int = 2; +pub const X_OK: ::c_int = 1; +pub const STDIN_FILENO: ::c_int = 0; +pub const STDOUT_FILENO: ::c_int = 1; +pub const STDERR_FILENO: ::c_int = 2; +pub const SIGHUP: ::c_int = 1; +pub const SIGINT: ::c_int = 2; +pub const SIGQUIT: ::c_int = 3; +pub const SIGILL: ::c_int = 4; +pub const SIGABRT: ::c_int = 6; +pub const SIGFPE: ::c_int = 8; +pub const SIGKILL: ::c_int = 9; +pub const SIGSEGV: ::c_int = 11; +pub const SIGPIPE: ::c_int = 7; +pub const SIGALRM: ::c_int = 14; +pub const SIGTERM: ::c_int = 15; + +pub const EAI_SYSTEM: ::c_int = 11; + +pub const PROT_NONE: ::c_int = 0; +pub const PROT_READ: ::c_int = 1; +pub const PROT_WRITE: ::c_int = 2; +pub const PROT_EXEC: ::c_int = 4; + +pub const LC_ALL: ::c_int = 0; +pub const LC_COLLATE: ::c_int = 1; +pub const LC_CTYPE: ::c_int = 2; +pub const LC_MONETARY: ::c_int = 3; +pub const LC_NUMERIC: ::c_int = 4; +pub const LC_TIME: ::c_int = 5; +pub const LC_MESSAGES: ::c_int = 6; + +// TODO: Haiku does not have MAP_FILE, but libstd/os.rs requires it +pub const MAP_FILE: ::c_int = 0x00; +pub const MAP_SHARED: ::c_int = 0x01; +pub const MAP_PRIVATE: ::c_int = 0x02; +pub const MAP_FIXED: ::c_int = 0x004; + +pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void; + +pub const MS_ASYNC: ::c_int = 0x01; +pub const MS_INVALIDATE: ::c_int = 0x04; +pub const MS_SYNC: ::c_int = 0x02; + +pub const EPERM : ::c_int = -2147483633; +pub const ENOENT : ::c_int = -2147459069; +pub const ESRCH : ::c_int = -2147454963; +pub const EINTR : ::c_int = -2147483638; +pub const EIO : ::c_int = -2147483647; +pub const ENXIO : ::c_int = -2147454965; +pub const E2BIG : ::c_int = -2147454975; +pub const ENOEXEC : ::c_int = -2147478782; +pub const EBADF : ::c_int = -2147459072; +pub const ECHILD : ::c_int = -2147454974; +pub const EDEADLK : ::c_int = -2147454973; +pub const ENOMEM : ::c_int = -2147454976; +pub const EACCES : ::c_int = -2147483646; +pub const EFAULT : ::c_int = -2147478783; +// pub const ENOTBLK : ::c_int = 15; +pub const EBUSY : ::c_int = -2147483634; +pub const EEXIST : ::c_int = -2147459070; +pub const EXDEV : ::c_int = -2147459061; +pub const ENODEV : ::c_int = -2147454969; +pub const ENOTDIR : ::c_int = -2147459067; +pub const EISDIR : ::c_int = -2147459063; +pub const EINVAL : ::c_int = -2147483643; +pub const ENFILE : ::c_int = -2147454970; +pub const EMFILE : ::c_int = -2147459062; +pub const ENOTTY : ::c_int = -2147454966; +pub const ETXTBSY : ::c_int = -2147454917; +pub const EFBIG : ::c_int = -2147454972; +pub const ENOSPC : ::c_int = -2147459065; +pub const ESPIPE : ::c_int = -2147454964; +pub const EROFS : ::c_int = -2147459064; +pub const EMLINK : ::c_int = -2147454971; +pub const EPIPE : ::c_int = -2147459059; +pub const EDOM : ::c_int = -2147454960; +pub const ERANGE : ::c_int = -2147454959; +pub const EAGAIN : ::c_int = -2147483637; +pub const EWOULDBLOCK : ::c_int = -2147483637; + +pub const EINPROGRESS : ::c_int = -2147454940; +pub const EALREADY : ::c_int = -2147454939; +pub const ENOTSOCK : ::c_int = -2147454932; +pub const EDESTADDRREQ : ::c_int = -2147454928; +pub const EMSGSIZE : ::c_int = -2147454934; +pub const EPROTOTYPE : ::c_int = -2147454958; +pub const ENOPROTOOPT : ::c_int = -2147454942; +pub const EPROTONOSUPPORT : ::c_int = -2147454957; +pub const EOPNOTSUPP : ::c_int = -2147454933; +pub const EPFNOSUPPORT : ::c_int = -2147454956; +pub const EAFNOSUPPORT : ::c_int = -2147454955; +pub const EADDRINUSE : ::c_int = -2147454954; +pub const EADDRNOTAVAIL : ::c_int = -2147454953; +pub const ENETDOWN : ::c_int = -2147454953; +pub const ENETUNREACH : ::c_int = -2147454951; +pub const ENETRESET : ::c_int = -2147454950; +pub const ECONNABORTED : ::c_int = -2147454949; +pub const ECONNRESET : ::c_int = -2147454948; +pub const ENOBUFS : ::c_int = -2147454941; +pub const EISCONN : ::c_int = -2147454947; +pub const ENOTCONN : ::c_int = -2147454946; +pub const ESHUTDOWN : ::c_int = -2147454945; +pub const ETIMEDOUT : ::c_int = -2147483639; +pub const ECONNREFUSED : ::c_int = -2147454944; +pub const ELOOP : ::c_int = -2147459060; +pub const ENAMETOOLONG : ::c_int = -2147459068; +pub const EHOSTDOWN : ::c_int = -2147454931; +pub const EHOSTUNREACH : ::c_int = -2147454943; +pub const ENOTEMPTY : ::c_int = -2147459066; +pub const EDQUOT : ::c_int = -2147454927; +pub const ESTALE : ::c_int = -2147454936; +pub const ENOLCK : ::c_int = -2147454968; +pub const ENOSYS : ::c_int = -2147454967; +pub const EIDRM : ::c_int = -2147454926; +pub const ENOMSG : ::c_int = -2147454937; +pub const EOVERFLOW : ::c_int = -2147454935; +pub const ECANCELED : ::c_int = -2147454929; +pub const EILSEQ : ::c_int = -2147454938; +pub const ENOATTR : ::c_int = -2147454916; +pub const EBADMSG : ::c_int = -2147454930; +pub const EMULTIHOP : ::c_int = -2147454925; +pub const ENOLINK : ::c_int = -2147454923; +pub const EPROTO : ::c_int = -2147454919; + +pub const IPPROTO_RAW: ::c_int = 255; + +// These are prefixed with POSIX_ on Haiku +pub const MADV_NORMAL: ::c_int = 1; +pub const MADV_SEQUENTIAL: ::c_int = 2; +pub const MADV_RANDOM: ::c_int = 3; +pub const MADV_WILLNEED: ::c_int = 4; +pub const MADV_DONTNEED: ::c_int = 5; + +pub const IFF_LOOPBACK: ::c_int = 0x0008; + +pub const AF_UNIX: ::c_int = 9; +pub const AF_INET: ::c_int = 1; +pub const AF_INET6: ::c_int = 6; +pub const SOCK_RAW: ::c_int = 3; +pub const IPPROTO_TCP: ::c_int = 6; +pub const IPPROTO_IP: ::c_int = 0; +pub const IPPROTO_IPV6: ::c_int = 41; +pub const IP_MULTICAST_TTL: ::c_int = 10; +pub const IP_MULTICAST_LOOP: ::c_int = 11; +pub const IP_TTL: ::c_int = 4; +pub const IP_HDRINCL: ::c_int = 2; +pub const IP_ADD_MEMBERSHIP: ::c_int = 12; +pub const IP_DROP_MEMBERSHIP: ::c_int = 13; + +pub const TCP_NODELAY: ::c_int = 0x01; +pub const TCP_MAXSEG: ::c_int = 0x02; +pub const TCP_NOPUSH: ::c_int = 0x04; +pub const TCP_NOOPT: ::c_int = 0x08; + +pub const IPV6_MULTICAST_LOOP: ::c_int = 26; +pub const IPV6_JOIN_GROUP: ::c_int = 28; +pub const IPV6_LEAVE_GROUP: ::c_int = 29; +pub const IPV6_V6ONLY: ::c_int = 30; + +pub const SO_DEBUG: ::c_int = 0x00000004; + +pub const MSG_NOSIGNAL: ::c_int = 0x0800; + +pub const SHUT_RD: ::c_int = 0; +pub const SHUT_WR: ::c_int = 1; +pub const SHUT_RDWR: ::c_int = 2; + +pub const LOCK_SH: ::c_int = 0x01; +pub const LOCK_EX: ::c_int = 0x02; +pub const LOCK_NB: ::c_int = 0x04; +pub const LOCK_UN: ::c_int = 0x08; + +pub const SIGSTKSZ: ::size_t = 16384; + +pub const SA_NODEFER: ::c_int = 0x08; +pub const SA_RESETHAND: ::c_int = 0x04; +pub const SA_RESTART: ::c_int = 0x10; +pub const SA_NOCLDSTOP: ::c_int = 0x01; + +pub const FD_SETSIZE: usize = 1024; + +pub const RTLD_NOW: ::c_int = 0x1; +pub const RTLD_DEFAULT: *mut ::c_void = 0isize as *mut ::c_void; + +pub const BUFSIZ: ::c_uint = 8192; +pub const FILENAME_MAX: ::c_uint = 256; +pub const FOPEN_MAX: ::c_uint = 128; +pub const L_tmpnam: ::c_uint = 512; +pub const TMP_MAX: ::c_uint = 32768; +pub const _PC_NAME_MAX: ::c_int = 4; + +pub const FIONBIO: ::c_int = 0xbe000000; + +pub const _SC_IOV_MAX : ::c_int = 32; +pub const _SC_GETGR_R_SIZE_MAX : ::c_int = 25; +pub const _SC_GETPW_R_SIZE_MAX : ::c_int = 26; +pub const _SC_PAGESIZE : ::c_int = 27; +pub const _SC_THREAD_ATTR_STACKADDR : ::c_int = 48; +pub const _SC_THREAD_ATTR_STACKSIZE : ::c_int = 49; +pub const _SC_THREAD_PRIORITY_SCHEDULING : ::c_int = 50; +pub const _SC_THREAD_PROCESS_SHARED : ::c_int = 46; +pub const _SC_THREAD_STACK_MIN : ::c_int = 47; +pub const _SC_THREADS : ::c_int = 31; +pub const _SC_ATEXIT_MAX : ::c_int = 37; + +pub const PTHREAD_STACK_MIN: ::size_t = 8192; + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + flags: 0, + lock: 0, + unused: -42, + owner: -1, + owner_count: 0, +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + flags: 0, + unused: -42, + mutex: 0 as *mut _, + waiter_count: 0, + lock: 0, +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + flags: 0, + owner: 0, + lock_sem: 0, + lock_count: 0, + reader_count: 0, + writer_count: 0, + waiters: [0 as *mut _; 2], +}; + +pub const PTHREAD_MUTEX_DEFAULT: ::c_int = 0; +pub const PTHREAD_MUTEX_NORMAL: ::c_int = 1; +pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2; +pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 3; + +pub const FIOCLEX: c_ulong = 0; // TODO: does not exist on Haiku! + +pub const SA_ONSTACK: c_ulong = 0x20; +pub const SA_SIGINFO: c_ulong = 0x40; +pub const SA_NOCLDWAIT: c_ulong = 0x02; + +pub const SIGCHLD: ::c_int = 5; +pub const SIGBUS: ::c_int = 30; +pub const SIG_SETMASK: ::c_int = 3; + +pub const RUSAGE_CHILDREN: ::c_int = -1; + +pub const SOCK_STREAM: ::c_int = 1; +pub const SOCK_DGRAM: ::c_int = 2; + +pub const SOL_SOCKET: ::c_int = -1; +pub const SO_ACCEPTCONN: ::c_int = 0x00000001; +pub const SO_BROADCAST: ::c_int = 0x00000002; +pub const SO_DONTROUTE: ::c_int = 0x00000008; +pub const SO_KEEPALIVE: ::c_int = 0x00000010; +pub const SO_OOBINLINE: ::c_int = 0x00000020; +pub const SO_REUSEADDR: ::c_int = 0x00000040; +pub const SO_REUSEPORT: ::c_int = 0x00000080; +pub const SO_USELOOPBACK: ::c_int = 0x00000100; +pub const SO_LINGER: ::c_int = 0x00000200; +pub const SO_SNDBUF: ::c_int = 0x40000001; +pub const SO_SNDLOWAT: ::c_int = 0x40000002; +pub const SO_SNDTIMEO: ::c_int = 0x40000003; +pub const SO_RCVBUF: ::c_int = 0x40000004; +pub const SO_RCVLOWAT: ::c_int = 0x40000005; +pub const SO_RCVTIMEO: ::c_int = 0x40000006; +pub const SO_ERROR: ::c_int = 0x40000007; +pub const SO_TYPE: ::c_int = 0x40000008; +pub const SO_NONBLOCK: ::c_int = 0x40000009; +pub const SO_BINDTODEVICE: ::c_int = 0x4000000a; +pub const SO_PEERCRED: ::c_int = 0x4000000b; + +pub const NI_MAXHOST: ::size_t = 1025; + +f! { + pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = mem::size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] &= !(1 << (fd % size)); + return + } + + pub fn FD_ISSET(fd: ::c_int, set: *mut fd_set) -> bool { + let fd = fd as usize; + let size = mem::size_of_val(&(*set).fds_bits[0]) * 8; + return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0 + } + + pub fn FD_SET(fd: ::c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = mem::size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] |= 1 << (fd % size); + return + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + for slot in (*set).fds_bits.iter_mut() { + *slot = 0; + } + } + + pub fn WIFEXITED(status: ::c_int) -> bool { + (status >> 8) == 0 + } + + pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { + (status & 0xff) + } + + pub fn WTERMSIG(status: ::c_int) -> ::c_int { + (status >> 8) & 0xff + } +} + +extern { + pub fn clock_gettime(clk_id: ::c_int, tp: *mut ::timespec) -> ::c_int; + pub fn pthread_attr_getguardsize(attr: *const ::pthread_attr_t, + guardsize: *mut ::size_t) -> ::c_int; + pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t, + stackaddr: *mut *mut ::c_void, + stacksize: *mut ::size_t) -> ::c_int; + pub fn pthread_condattr_getclock(attr: *const pthread_condattr_t, + clock_id: *mut clockid_t) -> ::c_int; + pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, + clock_id: clockid_t) -> ::c_int; + pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void; + pub fn setgroups(ngroups: ::size_t, + ptr: *const ::gid_t) -> ::c_int; + pub fn getpwuid_r(uid: ::uid_t, + pwd: *mut passwd, + buffer: *mut ::c_char, + bufferSize: ::size_t, + result: *mut *mut passwd) -> ::c_int; + pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int; + pub fn mprotect(addr: *const ::c_void, len: ::size_t, prot: ::c_int) + -> ::c_int; + pub fn getnameinfo(sa: *const ::sockaddr, + salen: ::socklen_t, + host: *mut ::c_char, + hostlen: ::size_t, + serv: *mut ::c_char, + sevlen: ::size_t, + flags: ::c_int) -> ::c_int; + pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, + abstime: *const ::timespec) -> ::c_int; +} + +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + mod b64; + pub use self::b64::*; + } else { + mod b32; + pub use self::b32::*; + } +} diff --git a/src/liblibc/src/unix/mod.rs b/src/liblibc/src/unix/mod.rs index 314219e78c..e822b063d9 100644 --- a/src/liblibc/src/unix/mod.rs +++ b/src/liblibc/src/unix/mod.rs @@ -17,6 +17,13 @@ pub enum DIR {} pub enum locale_t {} s! { + pub struct group { + pub gr_name: *mut ::c_char, + pub gr_passwd: *mut ::c_char, + pub gr_gid: ::gid_t, + pub gr_mem: *mut *mut ::c_char, + } + pub struct utimbuf { pub actime: time_t, pub modtime: time_t, @@ -217,6 +224,10 @@ cfg_if! { #[link(name = "c")] #[link(name = "m")] extern {} + } else if #[cfg(target_os = "haiku")] { + #[link(name = "root")] + #[link(name = "network")] + extern {} } else { #[link(name = "c")] #[link(name = "m")] @@ -226,6 +237,15 @@ cfg_if! { } extern { + pub fn getgrnam(name: *const ::c_char) -> *mut group; + pub fn getgrgid(gid: ::gid_t) -> *mut group; + + pub fn endpwent(); + #[cfg_attr(target_os = "netbsd", link_name = "__getpwnam50")] + pub fn getpwnam(name: *const ::c_char) -> *mut passwd; + #[cfg_attr(target_os = "netbsd", link_name = "__getpwuid50")] + pub fn getpwuid(uid: ::uid_t) -> *mut passwd; + pub fn fprintf(stream: *mut ::FILE, format: *const ::c_char, ...) -> ::c_int; pub fn printf(format: *const ::c_char, ...) -> ::c_int; @@ -235,6 +255,8 @@ extern { pub fn fscanf(stream: *mut ::FILE, format: *const ::c_char, ...) -> ::c_int; pub fn scanf(format: *const ::c_char, ...) -> ::c_int; pub fn sscanf(s: *const ::c_char, format: *const ::c_char, ...) -> ::c_int; + pub fn getchar_unlocked() -> ::c_int; + pub fn putchar_unlocked(c: ::c_int) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__socket30")] pub fn socket(domain: ::c_int, ty: ::c_int, protocol: ::c_int) -> ::c_int; @@ -365,6 +387,7 @@ extern { link_name = "getopt$UNIX2003")] pub fn getopt(argc: ::c_int, argv: *const *mut c_char, optstr: *const c_char) -> ::c_int; + pub fn getpgid(pid: pid_t) -> pid_t; pub fn getpgrp() -> pid_t; pub fn getpid() -> pid_t; pub fn getppid() -> pid_t; @@ -626,6 +649,10 @@ extern { link_name = "mktime$UNIX2003")] #[cfg_attr(target_os = "netbsd", link_name = "__mktime50")] pub fn mktime(tm: *mut tm) -> time_t; + #[cfg_attr(target_os = "netbsd", link_name = "__time50")] + pub fn time(time: *mut time_t) -> time_t; + #[cfg_attr(target_os = "netbsd", link_name = "__locatime50")] + pub fn localtime(time: *const time_t) -> *mut tm; #[cfg_attr(target_os = "netbsd", link_name = "__mknod50")] pub fn mknod(pathname: *const ::c_char, mode: ::mode_t, @@ -703,8 +730,6 @@ extern { // TODO: get rid of this cfg(not(...)) #[cfg(not(target_os = "android"))] // " if " -- appease style checker extern { - pub fn getifaddrs(ifap: *mut *mut ifaddrs) -> ::c_int; - pub fn freeifaddrs(ifa: *mut ifaddrs); #[cfg_attr(target_os = "macos", link_name = "glob$INODE64")] #[cfg_attr(target_os = "netbsd", link_name = "__glob30")] pub fn glob(pattern: *const c_char, @@ -828,6 +853,9 @@ cfg_if! { } else if #[cfg(target_os = "solaris")] { mod solaris; pub use self::solaris::*; + } else if #[cfg(target_os = "haiku")] { + mod haiku; + pub use self::haiku::*; } else { // Unknown target_os } diff --git a/src/liblibc/src/unix/notbsd/android/b32.rs b/src/liblibc/src/unix/notbsd/android/b32.rs index 91a56a3fca..bd69ccf37d 100644 --- a/src/liblibc/src/unix/notbsd/android/b32.rs +++ b/src/liblibc/src/unix/notbsd/android/b32.rs @@ -142,6 +142,10 @@ pub const PTHREAD_STACK_MIN: ::size_t = 4096 * 2; pub const CPU_SETSIZE: ::size_t = 32; pub const __CPU_BITS: ::size_t = 32; +pub const UT_LINESIZE: usize = 8; +pub const UT_NAMESIZE: usize = 8; +pub const UT_HOSTSIZE: usize = 16; + extern { pub fn timegm64(tm: *const ::tm) -> ::time64_t; } diff --git a/src/liblibc/src/unix/notbsd/android/b64.rs b/src/liblibc/src/unix/notbsd/android/b64.rs index 025dabd458..b35dde4216 100644 --- a/src/liblibc/src/unix/notbsd/android/b64.rs +++ b/src/liblibc/src/unix/notbsd/android/b64.rs @@ -152,6 +152,10 @@ pub const PTHREAD_STACK_MIN: ::size_t = 4096 * 4; pub const CPU_SETSIZE: ::size_t = 1024; pub const __CPU_BITS: ::size_t = 64; +pub const UT_LINESIZE: usize = 32; +pub const UT_NAMESIZE: usize = 32; +pub const UT_HOSTSIZE: usize = 256; + extern { pub fn timegm(tm: *const ::tm) -> ::time64_t; } diff --git a/src/liblibc/src/unix/notbsd/android/mod.rs b/src/liblibc/src/unix/notbsd/android/mod.rs index 5dbd39e9cf..efc136817d 100644 --- a/src/liblibc/src/unix/notbsd/android/mod.rs +++ b/src/liblibc/src/unix/notbsd/android/mod.rs @@ -1,5 +1,7 @@ //! Android-specific definitions for linux-like values +use dox::mem; + pub type c_char = u8; pub type clock_t = ::c_long; pub type time_t = ::c_long; @@ -100,11 +102,42 @@ s! { pub struct sem_t { count: ::c_uint, } + + pub struct lastlog { + ll_time: ::time_t, + ll_line: [::c_char; UT_LINESIZE], + ll_host: [::c_char; UT_HOSTSIZE], + } + + pub struct exit_status { + pub e_termination: ::c_short, + pub e_exit: ::c_short, + } + + pub struct utmp { + pub ut_type: ::c_short, + pub ut_pid: ::pid_t, + pub ut_line: [::c_char; UT_LINESIZE], + pub ut_id: [::c_char; 4], + + pub ut_user: [::c_char; UT_NAMESIZE], + pub ut_host: [::c_char; UT_HOSTSIZE], + pub ut_exit: exit_status, + pub ut_session: ::c_long, + pub ut_tv: ::timeval, + + pub ut_addr_v6: [::int32_t; 4], + unused: [::c_char; 20], + } } +pub const USER_PROCESS: ::c_short = 7; + pub const BUFSIZ: ::c_uint = 1024; pub const FILENAME_MAX: ::c_uint = 1024; pub const FOPEN_MAX: ::c_uint = 20; +pub const POSIX_FADV_DONTNEED: ::c_int = 4; +pub const POSIX_FADV_NOREUSE: ::c_int = 5; pub const L_tmpnam: ::c_uint = 1024; pub const TMP_MAX: ::c_uint = 308915776; pub const _PC_LINK_MAX: ::c_int = 1; @@ -502,6 +535,112 @@ pub const LINUX_REBOOT_CMD_RESTART2: ::c_int = 0xA1B2C3D4; pub const LINUX_REBOOT_CMD_SW_SUSPEND: ::c_int = 0xD000FCE2; pub const LINUX_REBOOT_CMD_KEXEC: ::c_int = 0x45584543; +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; + +pub const SIGSTKSZ: ::size_t = 8192; +pub const CBAUD: ::tcflag_t = 0o0010017; +pub const TAB1: ::c_int = 0x00000800; +pub const TAB2: ::c_int = 0x00001000; +pub const TAB3: ::c_int = 0x00001800; +pub const CR1: ::c_int = 0x00000200; +pub const CR2: ::c_int = 0x00000400; +pub const CR3: ::c_int = 0x00000600; +pub const FF1: ::c_int = 0x00008000; +pub const BS1: ::c_int = 0x00002000; +pub const VT1: ::c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: ::tcflag_t = 0x00000400; +pub const IXOFF: ::tcflag_t = 0x00001000; +pub const ONLCR: ::tcflag_t = 0x4; +pub const CSIZE: ::tcflag_t = 0x00000030; +pub const CS6: ::tcflag_t = 0x00000010; +pub const CS7: ::tcflag_t = 0x00000020; +pub const CS8: ::tcflag_t = 0x00000030; +pub const CSTOPB: ::tcflag_t = 0x00000040; +pub const CREAD: ::tcflag_t = 0x00000080; +pub const PARENB: ::tcflag_t = 0x00000100; +pub const PARODD: ::tcflag_t = 0x00000200; +pub const HUPCL: ::tcflag_t = 0x00000400; +pub const CLOCAL: ::tcflag_t = 0x00000800; +pub const ECHOKE: ::tcflag_t = 0x00000800; +pub const ECHOE: ::tcflag_t = 0x00000010; +pub const ECHOK: ::tcflag_t = 0x00000020; +pub const ECHONL: ::tcflag_t = 0x00000040; +pub const ECHOPRT: ::tcflag_t = 0x00000400; +pub const ECHOCTL: ::tcflag_t = 0x00000200; +pub const ISIG: ::tcflag_t = 0x00000001; +pub const ICANON: ::tcflag_t = 0x00000002; +pub const PENDIN: ::tcflag_t = 0x00004000; +pub const NOFLSH: ::tcflag_t = 0x00000080; + +pub const EAI_SYSTEM: ::c_int = 11; + +pub const NETLINK_ROUTE: ::c_int = 0; +pub const NETLINK_UNUSED: ::c_int = 1; +pub const NETLINK_USERSOCK: ::c_int = 2; +pub const NETLINK_FIREWALL: ::c_int = 3; +pub const NETLINK_SOCK_DIAG: ::c_int = 4; +pub const NETLINK_NFLOG: ::c_int = 5; +pub const NETLINK_XFRM: ::c_int = 6; +pub const NETLINK_SELINUX: ::c_int = 7; +pub const NETLINK_ISCSI: ::c_int = 8; +pub const NETLINK_AUDIT: ::c_int = 9; +pub const NETLINK_FIB_LOOKUP: ::c_int = 10; +pub const NETLINK_CONNECTOR: ::c_int = 11; +pub const NETLINK_NETFILTER: ::c_int = 12; +pub const NETLINK_IP6_FW: ::c_int = 13; +pub const NETLINK_DNRTMSG: ::c_int = 14; +pub const NETLINK_KOBJECT_UEVENT: ::c_int = 15; +pub const NETLINK_GENERIC: ::c_int = 16; +pub const NETLINK_SCSITRANSPORT: ::c_int = 18; +pub const NETLINK_ECRYPTFS: ::c_int = 19; +pub const NETLINK_RDMA: ::c_int = 20; +pub const NETLINK_CRYPTO: ::c_int = 21; +pub const NETLINK_INET_DIAG: ::c_int = NETLINK_SOCK_DIAG; + +pub const MAX_LINKS: ::c_int = 32; + +pub const NLM_F_REQUEST: ::c_int = 1; +pub const NLM_F_MULTI: ::c_int = 2; +pub const NLM_F_ACK: ::c_int = 4; +pub const NLM_F_ECHO: ::c_int = 8; +pub const NLM_F_DUMP_INTR: ::c_int = 16; + +pub const NLM_F_ROOT: ::c_int = 0x100; +pub const NLM_F_MATCH: ::c_int = 0x200; +pub const NLM_F_ATOMIC: ::c_int = 0x400; +pub const NLM_F_DUMP: ::c_int = NLM_F_ROOT | NLM_F_MATCH; + +pub const NLM_F_REPLACE: ::c_int = 0x100; +pub const NLM_F_EXCL: ::c_int = 0x200; +pub const NLM_F_CREATE: ::c_int = 0x400; +pub const NLM_F_APPEND: ::c_int = 0x800; + +pub const NLMSG_NOOP: ::c_int = 0x1; +pub const NLMSG_ERROR: ::c_int = 0x2; +pub const NLMSG_DONE: ::c_int = 0x3; +pub const NLMSG_OVERRUN: ::c_int = 0x4; +pub const NLMSG_MIN_TYPE: ::c_int = 0x10; + +pub const NETLINK_ADD_MEMBERSHIP: ::c_int = 1; +pub const NETLINK_DROP_MEMBERSHIP: ::c_int = 2; +pub const NETLINK_PKTINFO: ::c_int = 3; +pub const NETLINK_BROADCAST_ERROR: ::c_int = 4; +pub const NETLINK_NO_ENOBUFS: ::c_int = 5; +pub const NETLINK_RX_RING: ::c_int = 6; +pub const NETLINK_TX_RING: ::c_int = 7; + +pub const NLA_F_NESTED: ::c_int = 1 << 15; +pub const NLA_F_NET_BYTEORDER: ::c_int = 1 << 14; +pub const NLA_TYPE_MASK: ::c_int = !(NLA_F_NESTED | NLA_F_NET_BYTEORDER); + f! { pub fn sigemptyset(set: *mut sigset_t) -> ::c_int { *set = 0; @@ -555,6 +694,36 @@ f! { pub fn tcsendbreak(fd: ::c_int, duration: ::c_int) -> ::c_int { ioctl(fd, TCSBRKP, duration as *mut ::c_void) } + + pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { + for slot in cpuset.__bits.iter_mut() { + *slot = 0; + } + } + + pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in___bits = 8 * mem::size_of_val(&cpuset.__bits[0]); + let (idx, offset) = (cpu / size_in___bits, cpu % size_in___bits); + cpuset.__bits[idx] |= 1 << offset; + () + } + + pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in___bits = 8 * mem::size_of_val(&cpuset.__bits[0]); + let (idx, offset) = (cpu / size_in___bits, cpu % size_in___bits); + cpuset.__bits[idx] &= !(1 << offset); + () + } + + pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { + let size_in___bits = 8 * mem::size_of_val(&cpuset.__bits[0]); + let (idx, offset) = (cpu / size_in___bits, cpu % size_in___bits); + 0 != (cpuset.__bits[idx] & (1 << offset)) + } + + pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { + set1.__bits == set2.__bits + } } extern { @@ -591,6 +760,10 @@ extern { pub fn __sched_cpufree(set: *mut ::cpu_set_t); pub fn __sched_cpucount(setsize: ::size_t, set: *mut cpu_set_t) -> ::c_int; pub fn sched_getcpu() -> ::c_int; + + pub fn utmpname(name: *const ::c_char) -> ::c_int; + pub fn setutent(); + pub fn getutent() -> *mut utmp; } cfg_if! { diff --git a/src/liblibc/src/unix/notbsd/linux/mips.rs b/src/liblibc/src/unix/notbsd/linux/mips.rs index 6464adce77..085f2b31ce 100644 --- a/src/liblibc/src/unix/notbsd/linux/mips.rs +++ b/src/liblibc/src/unix/notbsd/linux/mips.rs @@ -129,6 +129,32 @@ s! { __unused5: ::c_ulong } + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + #[cfg(target_endian = "big")] + __glibc_reserved1: ::c_ulong, + pub msg_stime: ::time_t, + #[cfg(target_endian = "little")] + __glibc_reserved1: ::c_ulong, + #[cfg(target_endian = "big")] + __glibc_reserved2: ::c_ulong, + pub msg_rtime: ::time_t, + #[cfg(target_endian = "little")] + __glibc_reserved2: ::c_ulong, + #[cfg(target_endian = "big")] + __glibc_reserved3: ::c_ulong, + pub msg_ctime: ::time_t, + #[cfg(target_endian = "little")] + __glibc_reserved3: ::c_ulong, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __glibc_reserved4: ::c_ulong, + __glibc_reserved5: ::c_ulong, + } + pub struct statfs { pub f_type: ::c_long, pub f_bsize: ::c_long, @@ -203,14 +229,14 @@ s! { pub const BUFSIZ: ::c_uint = 8192; pub const TMP_MAX: ::c_uint = 238328; pub const FOPEN_MAX: ::c_uint = 16; +pub const POSIX_FADV_DONTNEED: ::c_int = 4; +pub const POSIX_FADV_NOREUSE: ::c_int = 5; pub const POSIX_MADV_DONTNEED: ::c_int = 4; pub const _SC_2_C_VERSION: ::c_int = 96; -pub const RUSAGE_THREAD: ::c_int = 1; pub const O_ACCMODE: ::c_int = 3; pub const O_DIRECT: ::c_int = 0x8000; pub const O_DIRECTORY: ::c_int = 0x10000; pub const O_NOFOLLOW: ::c_int = 0x20000; -pub const RUSAGE_CHILDREN: ::c_int = -1; pub const ST_RELATIME: ::c_ulong = 4096; pub const NI_MAXHOST: ::socklen_t = 1025; @@ -363,6 +389,7 @@ pub const SOCK_SEQPACKET: ::c_int = 5; pub const SOL_SOCKET: ::c_int = 0xffff; pub const SO_REUSEADDR: ::c_int = 4; +pub const SO_REUSEPORT: ::c_int = 0x200; pub const SO_TYPE: ::c_int = 4104; pub const SO_ERROR: ::c_int = 4103; pub const SO_DONTROUTE: ::c_int = 16; @@ -413,6 +440,11 @@ pub const SIG_SETMASK: ::c_int = 3; pub const SIG_BLOCK: ::c_int = 0x1; pub const SIG_UNBLOCK: ::c_int = 0x2; +pub const POLLRDNORM: ::c_short = 0x040; +pub const POLLWRNORM: ::c_short = 0x004; +pub const POLLRDBAND: ::c_short = 0x080; +pub const POLLWRBAND: ::c_short = 0x100; + pub const PTHREAD_STACK_MIN: ::size_t = 131072; pub const ADFS_SUPER_MAGIC: ::c_long = 0x0000adf5; @@ -545,6 +577,51 @@ pub const LINUX_REBOOT_CMD_KEXEC: ::c_int = 0x45584543; pub const SYS_gettid: ::c_long = 4222; // Valid for O32 +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; + +pub const SIGSTKSZ: ::size_t = 8192; +pub const CBAUD: ::tcflag_t = 0o0010017; +pub const TAB1: ::c_int = 0x00000800; +pub const TAB2: ::c_int = 0x00001000; +pub const TAB3: ::c_int = 0x00001800; +pub const CR1: ::c_int = 0x00000200; +pub const CR2: ::c_int = 0x00000400; +pub const CR3: ::c_int = 0x00000600; +pub const FF1: ::c_int = 0x00008000; +pub const BS1: ::c_int = 0x00002000; +pub const VT1: ::c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: ::tcflag_t = 0x00000400; +pub const IXOFF: ::tcflag_t = 0x00001000; +pub const ONLCR: ::tcflag_t = 0x4; +pub const CSIZE: ::tcflag_t = 0x00000030; +pub const CS6: ::tcflag_t = 0x00000010; +pub const CS7: ::tcflag_t = 0x00000020; +pub const CS8: ::tcflag_t = 0x00000030; +pub const CSTOPB: ::tcflag_t = 0x00000040; +pub const CREAD: ::tcflag_t = 0x00000080; +pub const PARENB: ::tcflag_t = 0x00000100; +pub const PARODD: ::tcflag_t = 0x00000200; +pub const HUPCL: ::tcflag_t = 0x00000400; +pub const CLOCAL: ::tcflag_t = 0x00000800; +pub const ECHOKE: ::tcflag_t = 0x00000800; +pub const ECHOE: ::tcflag_t = 0x00000010; +pub const ECHOK: ::tcflag_t = 0x00000020; +pub const ECHONL: ::tcflag_t = 0x00000040; +pub const ECHOPRT: ::tcflag_t = 0x00000400; +pub const ECHOCTL: ::tcflag_t = 0x00000200; +pub const ISIG: ::tcflag_t = 0x00000001; +pub const ICANON: ::tcflag_t = 0x00000002; +pub const PENDIN: ::tcflag_t = 0x00004000; +pub const NOFLSH: ::tcflag_t = 0x00000080; + #[link(name = "util")] extern { pub fn sysctl(name: *mut ::c_int, diff --git a/src/liblibc/src/unix/notbsd/linux/mips64.rs b/src/liblibc/src/unix/notbsd/linux/mips64.rs new file mode 100644 index 0000000000..9612495314 --- /dev/null +++ b/src/liblibc/src/unix/notbsd/linux/mips64.rs @@ -0,0 +1,234 @@ +pub type blkcnt_t = i64; +pub type blksize_t = i64; +pub type c_char = i8; +pub type c_long = i64; +pub type c_ulong = u64; +pub type fsblkcnt_t = ::c_ulong; +pub type fsfilcnt_t = ::c_ulong; +pub type ino_t = u64; +pub type nlink_t = u64; +pub type off_t = i64; +pub type rlim_t = ::c_ulong; +pub type suseconds_t = i64; +pub type time_t = i64; +pub type wchar_t = i32; + +s! { + pub struct stat { + pub st_dev: ::c_ulong, + st_pad1: [::c_long; 2], + pub st_ino: ::ino_t, + pub st_mode: ::mode_t, + pub st_nlink: ::nlink_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + pub st_rdev: ::c_ulong, + st_pad2: [::c_ulong; 1], + pub st_size: ::off_t, + st_pad3: ::c_long, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + pub st_blksize: ::blksize_t, + st_pad4: ::c_long, + pub st_blocks: ::blkcnt_t, + st_pad5: [::c_long; 7], + } + + pub struct stat64 { + pub st_dev: ::c_ulong, + st_pad1: [::c_long; 2], + pub st_ino: ::ino64_t, + pub st_mode: ::mode_t, + pub st_nlink: ::nlink_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + pub st_rdev: ::c_ulong, + st_pad2: [::c_long; 2], + pub st_size: ::off64_t, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + pub st_blksize: ::blksize_t, + st_pad3: ::c_long, + pub st_blocks: ::blkcnt64_t, + st_pad5: [::c_long; 7], + } + + pub struct pthread_attr_t { + __size: [::c_ulong; 7] + } + + pub struct sigaction { + pub sa_flags: ::c_int, + pub sa_sigaction: ::sighandler_t, + pub sa_mask: sigset_t, + _restorer: *mut ::c_void, + } + + pub struct stack_t { + pub ss_sp: *mut ::c_void, + pub ss_size: ::size_t, + pub ss_flags: ::c_int, + } + + pub struct sigset_t { + __size: [::c_ulong; 16], + } + + pub struct siginfo_t { + pub si_signo: ::c_int, + pub si_code: ::c_int, + pub si_errno: ::c_int, + _pad: ::c_int, + _pad2: [::c_long; 14], + } + + pub struct ipc_perm { + pub __key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::c_uint, + pub __seq: ::c_ushort, + __pad1: ::c_ushort, + __unused1: ::c_ulong, + __unused2: ::c_ulong + } + + pub struct shmid_ds { + pub shm_perm: ::ipc_perm, + pub shm_segsz: ::size_t, + pub shm_atime: ::time_t, + pub shm_dtime: ::time_t, + pub shm_ctime: ::time_t, + pub shm_cpid: ::pid_t, + pub shm_lpid: ::pid_t, + pub shm_nattch: ::shmatt_t, + __unused4: ::c_ulong, + __unused5: ::c_ulong + } + + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + pub msg_stime: ::time_t, + pub msg_rtime: ::time_t, + pub msg_ctime: ::time_t, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __glibc_reserved4: ::c_ulong, + __glibc_reserved5: ::c_ulong, + } + + pub struct statfs { + pub f_type: ::c_long, + pub f_bsize: ::c_long, + pub f_frsize: ::c_long, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_files: ::fsblkcnt_t, + pub f_ffree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_fsid: ::fsid_t, + + pub f_namelen: ::c_long, + f_spare: [::c_long; 6], + } + + pub struct msghdr { + pub msg_name: *mut ::c_void, + pub msg_namelen: ::socklen_t, + pub msg_iov: *mut ::iovec, + pub msg_iovlen: ::size_t, + pub msg_control: *mut ::c_void, + pub msg_controllen: ::size_t, + pub msg_flags: ::c_int, + } + + pub struct termios { + pub c_iflag: ::tcflag_t, + pub c_oflag: ::tcflag_t, + pub c_cflag: ::tcflag_t, + pub c_lflag: ::tcflag_t, + pub c_line: ::cc_t, + pub c_cc: [::cc_t; ::NCCS], + } + + pub struct sysinfo { + pub uptime: ::c_long, + pub loads: [::c_ulong; 3], + pub totalram: ::c_ulong, + pub freeram: ::c_ulong, + pub sharedram: ::c_ulong, + pub bufferram: ::c_ulong, + pub totalswap: ::c_ulong, + pub freeswap: ::c_ulong, + pub procs: ::c_ushort, + pub pad: ::c_ushort, + pub totalhigh: ::c_ulong, + pub freehigh: ::c_ulong, + pub mem_unit: ::c_uint, + pub _f: [::c_char; 0], + } + + // FIXME this is actually a union + pub struct sem_t { + __size: [::c_char; 32], + __align: [::c_long; 0], + } +} + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; + +pub const EADDRINUSE: ::c_int = 125; +pub const EADDRNOTAVAIL: ::c_int = 126; +pub const ECONNABORTED: ::c_int = 130; +pub const ECONNREFUSED: ::c_int = 146; +pub const ECONNRESET: ::c_int = 131; +pub const EDEADLK: ::c_int = 45; +pub const ENOSYS: ::c_int = 89; +pub const ENOTCONN: ::c_int = 134; +pub const ETIMEDOUT: ::c_int = 145; +pub const FIOCLEX: ::c_ulong = 0x6601; +pub const FIONBIO: ::c_ulong = 0x667e; +pub const MAP_ANON: ::c_int = 0x800; +pub const O_ACCMODE: ::c_int = 3; +pub const O_APPEND: ::c_int = 8; +pub const O_CREAT: ::c_int = 256; +pub const O_EXCL: ::c_int = 1024; +pub const O_NONBLOCK: ::c_int = 128; +pub const POSIX_FADV_DONTNEED: ::c_int = 4; +pub const POSIX_FADV_NOREUSE: ::c_int = 5; +pub const PTHREAD_STACK_MIN: ::size_t = 131072; +pub const RLIM_INFINITY: ::rlim_t = 0xffffffffffffffff; +pub const SA_ONSTACK: ::c_int = 0x08000000; +pub const SA_SIGINFO: ::c_int = 0x00000008; +pub const SIGBUS: ::c_int = 10; +pub const SIGSTKSZ: ::size_t = 0x2000; +pub const SIG_SETMASK: ::c_int = 3; +pub const SOCK_DGRAM: ::c_int = 1; +pub const SOCK_STREAM: ::c_int = 2; +pub const SOL_SOCKET: ::c_int = 0xffff; +pub const SO_BROADCAST: ::c_int = 32; +pub const SO_ERROR: ::c_int = 4103; +pub const SO_RCVTIMEO: ::c_int = 4102; +pub const SO_REUSEADDR: ::c_int = 4; +pub const SO_SNDTIMEO: ::c_int = 4101; + +#[link(name = "util")] +extern { + pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int; +} diff --git a/src/liblibc/src/unix/notbsd/linux/mod.rs b/src/liblibc/src/unix/notbsd/linux/mod.rs index 25007fbdf9..ea820ea5c6 100644 --- a/src/liblibc/src/unix/notbsd/linux/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/mod.rs @@ -11,9 +11,10 @@ pub type ino64_t = u64; pub type off64_t = i64; pub type blkcnt64_t = i64; pub type rlim64_t = u64; -pub type key_t = ::c_int; pub type shmatt_t = ::c_ulong; pub type mqd_t = ::c_int; +pub type msgqnum_t = ::c_ulong; +pub type msglen_t = ::c_ulong; pub type nfds_t = ::c_ulong; pub type nl_item = ::c_int; @@ -66,28 +67,30 @@ s! { pub struct pthread_mutex_t { #[cfg(any(target_arch = "mips", target_arch = "mipsel", - target_arch = "arm"))] + target_arch = "arm", target_arch = "powerpc"))] __align: [::c_long; 0], #[cfg(not(any(target_arch = "mips", target_arch = "mipsel", - target_arch = "arm")))] + target_arch = "arm", target_arch = "powerpc")))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_MUTEX_T], } pub struct pthread_rwlock_t { #[cfg(any(target_arch = "mips", target_arch = "mipsel", - target_arch = "arm"))] + target_arch = "arm", target_arch = "powerpc"))] __align: [::c_long; 0], #[cfg(not(any(target_arch = "mips", target_arch = "mipsel", - target_arch = "arm")))] + target_arch = "arm", target_arch = "powerpc")))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_RWLOCK_T], } pub struct pthread_mutexattr_t { - #[cfg(any(target_arch = "x86_64", target_arch = "powerpc64"))] + #[cfg(any(target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x"))] __align: [::c_int; 0], - #[cfg(not(any(target_arch = "x86_64", target_arch = "powerpc64")))] + #[cfg(not(any(target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x")))] __align: [::c_long; 0], size: [u8; __SIZEOF_PTHREAD_MUTEXATTR_T], } @@ -115,6 +118,18 @@ s! { pub pw_shell: *mut ::c_char, } + pub struct spwd { + pub sp_namp: *mut ::c_char, + pub sp_pwdp: *mut ::c_char, + pub sp_lstchg: ::c_long, + pub sp_min: ::c_long, + pub sp_max: ::c_long, + pub sp_warn: ::c_long, + pub sp_inact: ::c_long, + pub sp_expire: ::c_long, + pub sp_flag: ::c_ulong, + } + pub struct statvfs { pub f_bsize: ::c_ulong, pub f_frsize: ::c_ulong, @@ -124,9 +139,12 @@ s! { pub f_files: ::fsfilcnt_t, pub f_ffree: ::fsfilcnt_t, pub f_favail: ::fsfilcnt_t, + #[cfg(target_endian = "little")] pub f_fsid: ::c_ulong, #[cfg(target_pointer_width = "32")] - pub __f_unused: ::c_int, + __f_unused: ::c_int, + #[cfg(target_endian = "big")] + pub f_fsid: ::c_ulong, pub f_flag: ::c_ulong, pub f_namemax: ::c_ulong, __f_spare: [::c_int; 6], @@ -188,6 +206,17 @@ s! { pub if_name: *mut ::c_char, } + // System V IPC + pub struct msginfo { + pub msgpool: ::c_int, + pub msgmap: ::c_int, + pub msgmax: ::c_int, + pub msgmnb: ::c_int, + pub msgmni: ::c_int, + pub msgssz: ::c_int, + pub msgtql: ::c_int, + pub msgseg: ::c_ushort, + } } pub const ABDAY_1: ::nl_item = 0x20000; @@ -219,6 +248,8 @@ pub const ABMON_10: ::nl_item = 0x20017; pub const ABMON_11: ::nl_item = 0x20018; pub const ABMON_12: ::nl_item = 0x20019; +pub const CLONE_NEWCGROUP: ::c_int = 0x02000000; + pub const MON_1: ::nl_item = 0x2001A; pub const MON_2: ::nl_item = 0x2001B; pub const MON_3: ::nl_item = 0x2001C; @@ -250,6 +281,9 @@ pub const CODESET: ::nl_item = 14; pub const CRNCYSTR: ::nl_item = 0x4000F; +pub const RUSAGE_THREAD: ::c_int = 1; +pub const RUSAGE_CHILDREN: ::c_int = -1; + pub const RADIXCHAR: ::nl_item = 0x10000; pub const THOUSEP: ::nl_item = 0x10001; @@ -428,6 +462,9 @@ pub const SCHED_RR: ::c_int = 2; pub const SCHED_BATCH: ::c_int = 3; pub const SCHED_IDLE: ::c_int = 5; +// System V IPC +pub const IPC_PRIVATE: ::key_t = 0; + pub const IPC_CREAT: ::c_int = 0o1000; pub const IPC_EXCL: ::c_int = 0o2000; pub const IPC_NOWAIT: ::c_int = 0o4000; @@ -436,6 +473,12 @@ pub const IPC_RMID: ::c_int = 0; pub const IPC_SET: ::c_int = 1; pub const IPC_STAT: ::c_int = 2; pub const IPC_INFO: ::c_int = 3; +pub const MSG_STAT: ::c_int = 11; +pub const MSG_INFO: ::c_int = 12; + +pub const MSG_NOERROR: ::c_int = 0o10000; +pub const MSG_EXCEPT: ::c_int = 0o20000; +pub const MSG_COPY: ::c_int = 0o40000; pub const SHM_R: ::c_int = 0o400; pub const SHM_W: ::c_int = 0o200; @@ -463,8 +506,6 @@ pub const EFD_SEMAPHORE: ::c_int = 0x1; pub const NCCS: usize = 32; -pub const AF_NETLINK: ::c_int = 16; - pub const LOG_NFACILITIES: ::c_int = 24; pub const SEM_FAILED: *mut ::sem_t = 0 as *mut sem_t; @@ -477,6 +518,12 @@ pub const RB_POWER_OFF: ::c_int = 0x4321fedcu32 as i32; pub const RB_SW_SUSPEND: ::c_int = 0xd000fce2u32 as i32; pub const RB_KEXEC: ::c_int = 0x45584543u32 as i32; +pub const SYNC_FILE_RANGE_WAIT_BEFORE: ::c_uint = 1; +pub const SYNC_FILE_RANGE_WRITE: ::c_uint = 2; +pub const SYNC_FILE_RANGE_WAIT_AFTER: ::c_uint = 4; + +pub const EAI_SYSTEM: ::c_int = -11; + f! { pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { for slot in cpuset.bits.iter_mut() { @@ -491,6 +538,13 @@ f! { () } + pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * mem::size_of_val(&cpuset.bits[0]); // 32, 64 etc + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.bits[idx] &= !(1 << offset); + () + } + pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { let size_in_bits = 8 * mem::size_of_val(&cpuset.bits[0]); let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); @@ -503,8 +557,19 @@ f! { } extern { + pub fn lutimes(file: *const ::c_char, times: *const ::timeval) -> ::c_int; + + pub fn setpwent(); + pub fn getpwent() -> *mut passwd; + pub fn setspent(); + pub fn endspent(); + pub fn getspent() -> *mut spwd; + pub fn getspnam(__name: *const ::c_char) -> *mut spwd; + pub fn shm_open(name: *const c_char, oflag: ::c_int, mode: mode_t) -> ::c_int; + + // System V IPC pub fn shmget(key: ::key_t, size: ::size_t, shmflg: ::c_int) -> ::c_int; pub fn shmat(shmid: ::c_int, shmaddr: *const ::c_void, @@ -513,6 +578,14 @@ extern { pub fn shmctl(shmid: ::c_int, cmd: ::c_int, buf: *mut ::shmid_ds) -> ::c_int; + pub fn ftok(pathname: *const ::c_char, proj_id: ::c_int) -> ::key_t; + pub fn msgctl(msqid: ::c_int, cmd: ::c_int, buf: *mut msqid_ds) -> ::c_int; + pub fn msgget(key: ::key_t, msgflg: ::c_int) -> ::c_int; + pub fn msgrcv(msqid: ::c_int, msgp: *mut ::c_void, msgsz: ::size_t, + msgtyp: ::c_long, msgflg: ::c_int) -> ::ssize_t; + pub fn msgsnd(msqid: ::c_int, msgp: *const ::c_void, msgsz: ::size_t, + msgflg: ::c_int) -> ::c_int; + pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int) -> ::c_int; pub fn __errno_location() -> *mut ::c_int; @@ -588,21 +661,13 @@ extern { pub fn mq_setattr(mqd: ::mqd_t, newattr: *const ::mq_attr, oldattr: *mut ::mq_attr) -> ::c_int; - pub fn sched_getaffinity(pid: ::pid_t, - cpusetsize: ::size_t, - cpuset: *mut cpu_set_t) -> ::c_int; - pub fn sched_setaffinity(pid: ::pid_t, - cpusetsize: ::size_t, - cpuset: *const cpu_set_t) -> ::c_int; pub fn epoll_pwait(epfd: ::c_int, events: *mut ::epoll_event, maxevents: ::c_int, timeout: ::c_int, sigmask: *const ::sigset_t) -> ::c_int; pub fn dup3(oldfd: ::c_int, newfd: ::c_int, flags: ::c_int) -> ::c_int; - pub fn unshare(flags: ::c_int) -> ::c_int; pub fn sethostname(name: *const ::c_char, len: ::size_t) -> ::c_int; - pub fn setns(fd: ::c_int, nstype: ::c_int) -> ::c_int; pub fn mkostemp(template: *mut ::c_char, flags: ::c_int) -> ::c_int; pub fn mkostemps(template: *mut ::c_char, suffixlen: ::c_int, @@ -655,6 +720,10 @@ extern { mode: ::mode_t) -> ::c_int; pub fn if_nameindex() -> *mut if_nameindex; pub fn if_freenameindex(ptr: *mut if_nameindex); + pub fn sync_file_range(fd: ::c_int, offset: ::off64_t, + nbytes: ::off64_t, flags: ::c_uint) -> ::c_int; + pub fn getifaddrs(ifap: *mut *mut ::ifaddrs) -> ::c_int; + pub fn freeifaddrs(ifa: *mut ::ifaddrs); } cfg_if! { @@ -665,6 +734,12 @@ cfg_if! { } else if #[cfg(any(target_arch = "mips", target_arch = "mipsel"))] { mod mips; pub use self::mips::*; + } else if #[cfg(any(target_arch = "s390x"))] { + mod s390x; + pub use self::s390x::*; + } else if #[cfg(any(target_arch = "mips64"))] { + mod mips64; + pub use self::mips64::*; } else { mod other; pub use self::other::*; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b32/arm.rs b/src/liblibc/src/unix/notbsd/linux/musl/b32/arm.rs index 5d648618e3..998580d3e2 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b32/arm.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b32/arm.rs @@ -68,6 +68,23 @@ s! { __pad2: ::c_ulong, } + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + pub msg_stime: ::time_t, + __unused1: ::c_int, + pub msg_rtime: ::time_t, + __unused2: ::c_int, + pub msg_ctime: ::time_t, + __unused3: ::c_int, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __pad1: ::c_ulong, + __pad2: ::c_ulong, + } + pub struct statfs { pub f_type: ::c_ulong, pub f_bsize: ::c_ulong, @@ -82,6 +99,14 @@ s! { pub f_flags: ::c_ulong, pub f_spare: [::c_ulong; 4], } + + pub struct siginfo_t { + pub si_signo: ::c_int, + pub si_errno: ::c_int, + pub si_code: ::c_int, + pub _pad: [::c_int; 29], + _align: [usize; 0], + } } pub const O_DIRECT: ::c_int = 0x4000; @@ -306,3 +331,6 @@ pub const TIOCCONS: ::c_int = 0x541D; pub const SYS_gettid: ::c_long = 224; pub const SYS_perf_event_open: ::c_long = 364; + +pub const POLLWRNORM: ::c_short = 0x100; +pub const POLLWRBAND: ::c_short = 0x200; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b32/asmjs.rs b/src/liblibc/src/unix/notbsd/linux/musl/b32/asmjs.rs index 93e4ab6b6d..91a96c185a 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b32/asmjs.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b32/asmjs.rs @@ -68,6 +68,23 @@ s! { __pad2: ::c_ulong, } + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + pub msg_stime: ::time_t, + __unused1: ::c_int, + pub msg_rtime: ::time_t, + __unused2: ::c_int, + pub msg_ctime: ::time_t, + __unused3: ::c_int, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __pad1: ::c_ulong, + __pad2: ::c_ulong, + } + pub struct statfs { pub f_type: ::c_ulong, pub f_bsize: ::c_ulong, @@ -82,6 +99,14 @@ s! { pub f_flags: ::c_ulong, pub f_spare: [::c_ulong; 4], } + + pub struct siginfo_t { + pub si_signo: ::c_int, + pub si_errno: ::c_int, + pub si_code: ::c_int, + pub _pad: [::c_int; 29], + _align: [usize; 0], + } } pub const O_DIRECT: ::c_int = 0x4000; @@ -305,3 +330,6 @@ pub const FIONREAD: ::c_int = 0x541B; pub const TIOCCONS: ::c_int = 0x541D; pub const SYS_gettid: ::c_long = 224; // Valid for arm (32-bit) and x86 (32-bit) + +pub const POLLWRNORM: ::c_short = 0x100; +pub const POLLWRBAND: ::c_short = 0x200; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b32/mips.rs b/src/liblibc/src/unix/notbsd/linux/musl/b32/mips.rs index f52d195322..9ebfe4a68f 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b32/mips.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b32/mips.rs @@ -67,6 +67,32 @@ s! { __pad2: ::c_ulong, } + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + #[cfg(target_endian = "big")] + __unused1: ::c_int, + pub msg_stime: ::time_t, + #[cfg(target_endian = "little")] + __unused1: ::c_int, + #[cfg(target_endian = "big")] + __unused2: ::c_int, + pub msg_rtime: ::time_t, + #[cfg(target_endian = "little")] + __unused2: ::c_int, + #[cfg(target_endian = "big")] + __unused3: ::c_int, + pub msg_ctime: ::time_t, + #[cfg(target_endian = "little")] + __unused3: ::c_int, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __pad1: ::c_ulong, + __pad2: ::c_ulong, + } + pub struct statfs { pub f_type: ::c_ulong, pub f_bsize: ::c_ulong, @@ -81,6 +107,14 @@ s! { pub f_flags: ::c_ulong, pub f_spare: [::c_ulong; 5], } + + pub struct siginfo_t { + pub si_signo: ::c_int, + pub si_code: ::c_int, + pub si_errno: ::c_int, + pub _pad: [::c_int; 29], + _align: [usize; 0], + } } pub const O_DIRECT: ::c_int = 0o100000; @@ -299,9 +333,12 @@ pub const TIOCSWINSZ: ::c_int = 0x80087467; pub const TIOCMGET: ::c_int = 0x741D; pub const TIOCMBIS: ::c_int = 0x741B; pub const TIOCMBIC: ::c_int = 0x741C; -pub const TIOCMSET: ::c_int = 0x741D; +pub const TIOCMSET: ::c_int = 0x741A; pub const FIONREAD: ::c_int = 0x467F; pub const TIOCCONS: ::c_int = 0x80047478; pub const SYS_gettid: ::c_long = 4222; // Valid for O32 pub const SYS_perf_event_open: ::c_long = 4333; // Valid for O32 + +pub const POLLWRNORM: ::c_short = 0x4; +pub const POLLWRBAND: ::c_short = 0x100; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b32/mod.rs b/src/liblibc/src/unix/notbsd/linux/musl/b32/mod.rs index 6ae90bd04e..899bc69310 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b32/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b32/mod.rs @@ -39,7 +39,9 @@ cfg_if! { } else if #[cfg(any(target_arch = "arm"))] { mod arm; pub use self::arm::*; - } else if #[cfg(any(target_arch = "asmjs"))] { + } else if #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))] { + // For the time being asmjs and wasm32 are the same, and both + // backed by identical emscripten runtimes mod asmjs; pub use self::asmjs::*; } else { diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b32/x86.rs b/src/liblibc/src/unix/notbsd/linux/musl/b32/x86.rs index 9d057de7b9..194b8fd8bd 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b32/x86.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b32/x86.rs @@ -68,6 +68,23 @@ s! { __pad2: ::c_ulong, } + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + pub msg_stime: ::time_t, + __unused1: ::c_int, + pub msg_rtime: ::time_t, + __unused2: ::c_int, + pub msg_ctime: ::time_t, + __unused3: ::c_int, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __pad1: ::c_ulong, + __pad2: ::c_ulong, + } + pub struct statfs { pub f_type: ::c_ulong, pub f_bsize: ::c_ulong, @@ -95,6 +112,14 @@ s! { pub uc_sigmask: ::sigset_t, __private: [u8; 112], } + + pub struct siginfo_t { + pub si_signo: ::c_int, + pub si_errno: ::c_int, + pub si_code: ::c_int, + pub _pad: [::c_int; 29], + _align: [usize; 0], + } } pub const O_DIRECT: ::c_int = 0x4000; @@ -320,3 +345,6 @@ pub const TIOCCONS: ::c_int = 0x541D; pub const SYS_gettid: ::c_long = 224; pub const SYS_perf_event_open: ::c_long = 336; + +pub const POLLWRNORM: ::c_short = 0x100; +pub const POLLWRBAND: ::c_short = 0x200; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b64/aarch64.rs b/src/liblibc/src/unix/notbsd/linux/musl/b64/aarch64.rs index 51db30f2f1..23f7dd35e5 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b64/aarch64.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b64/aarch64.rs @@ -1 +1,3 @@ +pub type c_char = u8; + pub const SYS_perf_event_open: ::c_long = 241; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b64/mod.rs b/src/liblibc/src/unix/notbsd/linux/musl/b64/mod.rs index 0501c553c3..fdaf52e166 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b64/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b64/mod.rs @@ -1,4 +1,3 @@ -pub type c_char = i8; pub type wchar_t = i32; pub type c_long = i64; pub type c_ulong = u64; @@ -74,6 +73,20 @@ s! { __pad2: ::c_ulong, } + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + pub msg_stime: ::time_t, + pub msg_rtime: ::time_t, + pub msg_ctime: ::time_t, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __pad1: ::c_ulong, + __pad2: ::c_ulong, + } + pub struct statfs { pub f_type: ::c_ulong, pub f_bsize: ::c_ulong, @@ -104,6 +117,14 @@ s! { pub struct sem_t { __val: [::c_int; 8], } + + pub struct siginfo_t { + pub si_signo: ::c_int, + pub si_errno: ::c_int, + pub si_code: ::c_int, + pub _pad: [::c_int; 29], + _align: [usize; 0], + } } pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; @@ -330,6 +351,9 @@ pub const TIOCMSET: ::c_int = 0x5418; pub const FIONREAD: ::c_int = 0x541B; pub const TIOCCONS: ::c_int = 0x541D; +pub const POLLWRNORM: ::c_short = 0x100; +pub const POLLWRBAND: ::c_short = 0x200; + cfg_if! { if #[cfg(target_arch = "aarch64")] { mod aarch64; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b64/powerpc64.rs b/src/liblibc/src/unix/notbsd/linux/musl/b64/powerpc64.rs index bb81863654..4b8ca10aab 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b64/powerpc64.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b64/powerpc64.rs @@ -1 +1,3 @@ +pub type c_char = u8; + pub const SYS_perf_event_open: ::c_long = 319; diff --git a/src/liblibc/src/unix/notbsd/linux/musl/b64/x86_64.rs b/src/liblibc/src/unix/notbsd/linux/musl/b64/x86_64.rs index 02324dae30..2cfd903ca8 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/b64/x86_64.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/b64/x86_64.rs @@ -1,3 +1,5 @@ +pub type c_char = i8; + s! { pub struct mcontext_t { __private: [u64; 32], diff --git a/src/liblibc/src/unix/notbsd/linux/musl/mod.rs b/src/liblibc/src/unix/notbsd/linux/musl/mod.rs index 0056721883..69a85b86ea 100644 --- a/src/liblibc/src/unix/notbsd/linux/musl/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/musl/mod.rs @@ -18,14 +18,6 @@ s! { _restorer: *mut ::c_void, } - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_errno: ::c_int, - pub si_code: ::c_int, - pub _pad: [::c_int; 29], - _align: [usize; 0], - } - pub struct ipc_perm { pub __ipc_perm_key: ::key_t, pub uid: ::uid_t, @@ -78,12 +70,14 @@ s! { pub const BUFSIZ: ::c_uint = 1024; pub const TMP_MAX: ::c_uint = 10000; pub const FOPEN_MAX: ::c_uint = 1000; -pub const POSIX_MADV_DONTNEED: ::c_int = 0; pub const O_ACCMODE: ::c_int = 0o10000003; pub const O_NDELAY: ::c_int = O_NONBLOCK; -pub const RUSAGE_CHILDREN: ::c_int = 1; pub const NI_MAXHOST: ::socklen_t = 255; pub const PTHREAD_STACK_MIN: ::size_t = 2048; +pub const POSIX_FADV_DONTNEED: ::c_int = 4; +pub const POSIX_FADV_NOREUSE: ::c_int = 5; + +pub const POSIX_MADV_DONTNEED: ::c_int = 4; pub const RLIM_INFINITY: ::rlim_t = !0; pub const RLIMIT_RTTIME: ::c_int = 15; @@ -138,11 +132,24 @@ pub const PTRACE_INTERRUPT: ::c_int = 0x4207; pub const PTRACE_LISTEN: ::c_int = 0x4208; pub const PTRACE_PEEKSIGINFO: ::c_int = 0x4209; +pub const PTRACE_O_EXITKILL: ::c_int = 1048576; +pub const PTRACE_O_TRACECLONE: ::c_int = 8; +pub const PTRACE_O_TRACEEXEC: ::c_int = 16; +pub const PTRACE_O_TRACEEXIT: ::c_int = 64; +pub const PTRACE_O_TRACEFORK: ::c_int = 2; +pub const PTRACE_O_TRACESYSGOOD: ::c_int = 1; +pub const PTRACE_O_TRACEVFORK: ::c_int = 4; +pub const PTRACE_O_TRACEVFORKDONE: ::c_int = 32; +pub const PTRACE_O_SUSPEND_SECCOMP: ::c_int = 2097152; + pub const MADV_DODUMP: ::c_int = 17; pub const MADV_DONTDUMP: ::c_int = 16; pub const EPOLLWAKEUP: ::c_int = 0x20000000; +pub const POLLRDNORM: ::c_short = 0x040; +pub const POLLRDBAND: ::c_short = 0x080; + pub const MADV_HUGEPAGE: ::c_int = 14; pub const MADV_NOHUGEPAGE: ::c_int = 15; @@ -171,6 +178,51 @@ pub const RTLD_NOLOAD: ::c_int = 0x4; pub const CLOCK_SGI_CYCLE: ::clockid_t = 10; pub const CLOCK_TAI: ::clockid_t = 11; +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; + +pub const SIGSTKSZ: ::size_t = 8192; +pub const CBAUD: ::tcflag_t = 0o0010017; +pub const TAB1: ::c_int = 0x00000800; +pub const TAB2: ::c_int = 0x00001000; +pub const TAB3: ::c_int = 0x00001800; +pub const CR1: ::c_int = 0x00000200; +pub const CR2: ::c_int = 0x00000400; +pub const CR3: ::c_int = 0x00000600; +pub const FF1: ::c_int = 0x00008000; +pub const BS1: ::c_int = 0x00002000; +pub const VT1: ::c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: ::tcflag_t = 0x00000400; +pub const IXOFF: ::tcflag_t = 0x00001000; +pub const ONLCR: ::tcflag_t = 0x4; +pub const CSIZE: ::tcflag_t = 0x00000030; +pub const CS6: ::tcflag_t = 0x00000010; +pub const CS7: ::tcflag_t = 0x00000020; +pub const CS8: ::tcflag_t = 0x00000030; +pub const CSTOPB: ::tcflag_t = 0x00000040; +pub const CREAD: ::tcflag_t = 0x00000080; +pub const PARENB: ::tcflag_t = 0x00000100; +pub const PARODD: ::tcflag_t = 0x00000200; +pub const HUPCL: ::tcflag_t = 0x00000400; +pub const CLOCAL: ::tcflag_t = 0x00000800; +pub const ECHOKE: ::tcflag_t = 0x00000800; +pub const ECHOE: ::tcflag_t = 0x00000010; +pub const ECHOK: ::tcflag_t = 0x00000020; +pub const ECHONL: ::tcflag_t = 0x00000040; +pub const ECHOPRT: ::tcflag_t = 0x00000400; +pub const ECHOCTL: ::tcflag_t = 0x00000200; +pub const ISIG: ::tcflag_t = 0x00000001; +pub const ICANON: ::tcflag_t = 0x00000002; +pub const PENDIN: ::tcflag_t = 0x00004000; +pub const NOFLSH: ::tcflag_t = 0x00000080; + extern { pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int; pub fn ptrace(request: ::c_int, ...) -> ::c_long; @@ -185,7 +237,8 @@ cfg_if! { } else if #[cfg(any(target_arch = "x86", target_arch = "mips", target_arch = "arm", - target_arch = "asmjs"))] { + target_arch = "asmjs", + target_arch = "wasm32"))] { mod b32; pub use self::b32::*; } else { } diff --git a/src/liblibc/src/unix/notbsd/linux/other/b32/arm.rs b/src/liblibc/src/unix/notbsd/linux/other/b32/arm.rs index 7de4b8b990..2c6fbcd6e3 100644 --- a/src/liblibc/src/unix/notbsd/linux/other/b32/arm.rs +++ b/src/liblibc/src/unix/notbsd/linux/other/b32/arm.rs @@ -1,6 +1,77 @@ pub type c_char = u8; pub type wchar_t = u32; +s! { + pub struct ipc_perm { + pub __key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::c_ushort, + __pad1: ::c_ushort, + pub __seq: ::c_ushort, + __pad2: ::c_ushort, + __unused1: ::c_ulong, + __unused2: ::c_ulong + } + + pub struct stat64 { + pub st_dev: ::dev_t, + __pad1: ::c_uint, + __st_ino: ::ino_t, + pub st_mode: ::mode_t, + pub st_nlink: ::nlink_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + pub st_rdev: ::dev_t, + __pad2: ::c_uint, + pub st_size: ::off64_t, + pub st_blksize: ::blksize_t, + pub st_blocks: ::blkcnt64_t, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + pub st_ino: ::ino64_t, + } + + pub struct shmid_ds { + pub shm_perm: ::ipc_perm, + pub shm_segsz: ::size_t, + pub shm_atime: ::time_t, + __unused1: ::c_ulong, + pub shm_dtime: ::time_t, + __unused2: ::c_ulong, + pub shm_ctime: ::time_t, + __unused3: ::c_ulong, + pub shm_cpid: ::pid_t, + pub shm_lpid: ::pid_t, + pub shm_nattch: ::shmatt_t, + __unused4: ::c_ulong, + __unused5: ::c_ulong + } + + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + pub msg_stime: ::time_t, + __glibc_reserved1: ::c_ulong, + pub msg_rtime: ::time_t, + __glibc_reserved2: ::c_ulong, + pub msg_ctime: ::time_t, + __glibc_reserved3: ::c_ulong, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __glibc_reserved4: ::c_ulong, + __glibc_reserved5: ::c_ulong, + } +} + pub const O_DIRECT: ::c_int = 0x10000; pub const O_DIRECTORY: ::c_int = 0x4000; pub const O_NOFOLLOW: ::c_int = 0x8000; @@ -21,3 +92,77 @@ pub const FIONBIO: ::c_ulong = 0x5421; pub const SYS_gettid: ::c_long = 224; pub const SYS_perf_event_open: ::c_long = 364; + +pub const PTRACE_GETFPXREGS: ::c_uint = 18; +pub const PTRACE_SETFPXREGS: ::c_uint = 19; + +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; + +pub const SIGSTKSZ: ::size_t = 8192; +pub const CBAUD: ::tcflag_t = 0o0010017; +pub const TAB1: ::c_int = 0x00000800; +pub const TAB2: ::c_int = 0x00001000; +pub const TAB3: ::c_int = 0x00001800; +pub const CR1: ::c_int = 0x00000200; +pub const CR2: ::c_int = 0x00000400; +pub const CR3: ::c_int = 0x00000600; +pub const FF1: ::c_int = 0x00008000; +pub const BS1: ::c_int = 0x00002000; +pub const VT1: ::c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: ::tcflag_t = 0x00000400; +pub const IXOFF: ::tcflag_t = 0x00001000; +pub const ONLCR: ::tcflag_t = 0x4; +pub const CSIZE: ::tcflag_t = 0x00000030; +pub const CS6: ::tcflag_t = 0x00000010; +pub const CS7: ::tcflag_t = 0x00000020; +pub const CS8: ::tcflag_t = 0x00000030; +pub const CSTOPB: ::tcflag_t = 0x00000040; +pub const CREAD: ::tcflag_t = 0x00000080; +pub const PARENB: ::tcflag_t = 0x00000100; +pub const PARODD: ::tcflag_t = 0x00000200; +pub const HUPCL: ::tcflag_t = 0x00000400; +pub const CLOCAL: ::tcflag_t = 0x00000800; +pub const ECHOKE: ::tcflag_t = 0x00000800; +pub const ECHOE: ::tcflag_t = 0x00000010; +pub const ECHOK: ::tcflag_t = 0x00000020; +pub const ECHONL: ::tcflag_t = 0x00000040; +pub const ECHOPRT: ::tcflag_t = 0x00000400; +pub const ECHOCTL: ::tcflag_t = 0x00000200; +pub const ISIG: ::tcflag_t = 0x00000001; +pub const ICANON: ::tcflag_t = 0x00000002; +pub const PENDIN: ::tcflag_t = 0x00004000; +pub const NOFLSH: ::tcflag_t = 0x00000080; + +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: ::tcflag_t = 0x00008000; +pub const TOSTOP: ::tcflag_t = 0x00000100; +pub const FLUSHO: ::tcflag_t = 0x00001000; +pub const EXTPROC: ::tcflag_t = 0x00010000; +pub const TCGETS: ::c_ulong = 0x5401; +pub const TCSETS: ::c_ulong = 0x5402; +pub const TCSETSW: ::c_ulong = 0x5403; +pub const TCSETSF: ::c_ulong = 0x5404; +pub const TCGETA: ::c_ulong = 0x5405; +pub const TCSETA: ::c_ulong = 0x5406; +pub const TCSETAW: ::c_ulong = 0x5407; +pub const TCSETAF: ::c_ulong = 0x5408; +pub const TCSBRK: ::c_ulong = 0x5409; +pub const TCXONC: ::c_ulong = 0x540A; +pub const TCFLSH: ::c_ulong = 0x540B; +pub const TIOCINQ: ::c_ulong = 0x541B; +pub const TIOCGPGRP: ::c_ulong = 0x540F; +pub const TIOCSPGRP: ::c_ulong = 0x5410; +pub const TIOCOUTQ: ::c_ulong = 0x5411; +pub const TIOCGWINSZ: ::c_ulong = 0x5413; +pub const TIOCSWINSZ: ::c_ulong = 0x5414; +pub const FIONREAD: ::c_ulong = 0x541B; diff --git a/src/liblibc/src/unix/notbsd/linux/other/b32/mod.rs b/src/liblibc/src/unix/notbsd/linux/other/b32/mod.rs index ba48d638b1..48c3502ada 100644 --- a/src/liblibc/src/unix/notbsd/linux/other/b32/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/other/b32/mod.rs @@ -37,28 +37,6 @@ s! { __unused5: ::c_long, } - pub struct stat64 { - pub st_dev: ::dev_t, - __pad1: ::c_uint, - __st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __pad2: ::c_uint, - pub st_size: ::off64_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt64_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_ino: ::ino64_t, - } - pub struct pthread_attr_t { __size: [u32; 9] } @@ -92,8 +70,6 @@ pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; pub const PTRACE_GETFPREGS: ::c_uint = 14; pub const PTRACE_SETFPREGS: ::c_uint = 15; -pub const PTRACE_GETFPXREGS: ::c_uint = 18; -pub const PTRACE_SETFPXREGS: ::c_uint = 19; pub const PTRACE_GETREGS: ::c_uint = 12; pub const PTRACE_SETREGS: ::c_uint = 13; diff --git a/src/liblibc/src/unix/notbsd/linux/other/b32/powerpc.rs b/src/liblibc/src/unix/notbsd/linux/other/b32/powerpc.rs index 2334c1f8cd..3125e9f337 100644 --- a/src/liblibc/src/unix/notbsd/linux/other/b32/powerpc.rs +++ b/src/liblibc/src/unix/notbsd/linux/other/b32/powerpc.rs @@ -1,6 +1,77 @@ pub type c_char = u8; pub type wchar_t = i32; +s! { + pub struct ipc_perm { + __key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::mode_t, + __seq: ::uint32_t, + __pad1: ::uint32_t, + __glibc_reserved1: ::uint64_t, + __glibc_reserved2: ::uint64_t, + } + + pub struct stat64 { + pub st_dev: ::dev_t, + pub st_ino: ::ino64_t, + pub st_mode: ::mode_t, + pub st_nlink: ::nlink_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + pub st_rdev: ::dev_t, + __pad2: ::c_ushort, + pub st_size: ::off64_t, + pub st_blksize: ::blksize_t, + pub st_blocks: ::blkcnt64_t, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + __glibc_reserved4: ::c_ulong, + __glibc_reserved5: ::c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: ::ipc_perm, + __glibc_reserved1: ::c_uint, + pub shm_atime: ::time_t, + __glibc_reserved2: ::c_uint, + pub shm_dtime: ::time_t, + __glibc_reserved3: ::c_uint, + pub shm_ctime: ::time_t, + __glibc_reserved4: ::c_uint, + pub shm_segsz: ::size_t, + pub shm_cpid: ::pid_t, + pub shm_lpid: ::pid_t, + pub shm_nattch: ::shmatt_t, + __glibc_reserved5: ::c_ulong, + __glibc_reserved6: ::c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + __glibc_reserved1: ::c_uint, + pub msg_stime: ::time_t, + __glibc_reserved2: ::c_uint, + pub msg_rtime: ::time_t, + __glibc_reserved3: ::c_uint, + pub msg_ctime: ::time_t, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __glibc_reserved4: ::c_ulong, + __glibc_reserved5: ::c_ulong, + } +} + pub const O_DIRECT: ::c_int = 0x20000; pub const O_DIRECTORY: ::c_int = 0x4000; pub const O_NOFOLLOW: ::c_int = 0x8000; @@ -21,3 +92,74 @@ pub const FIONBIO: ::c_ulong = 0x8004667e; pub const SYS_gettid: ::c_long = 207; pub const SYS_perf_event_open: ::c_long = 319; + +pub const MCL_CURRENT: ::c_int = 0x2000; +pub const MCL_FUTURE: ::c_int = 0x4000; + +pub const SIGSTKSZ: ::size_t = 0x4000; +pub const CBAUD: ::tcflag_t = 0xff; +pub const TAB1: ::c_int = 0x400; +pub const TAB2: ::c_int = 0x800; +pub const TAB3: ::c_int = 0xc00; +pub const CR1: ::c_int = 0x1000; +pub const CR2: ::c_int = 0x2000; +pub const CR3: ::c_int = 0x3000; +pub const FF1: ::c_int = 0x4000; +pub const BS1: ::c_int = 0x8000; +pub const VT1: ::c_int = 0x10000; +pub const VWERASE: usize = 0xa; +pub const VREPRINT: usize = 0xb; +pub const VSUSP: usize = 0xc; +pub const VSTART: usize = 0xd; +pub const VSTOP: usize = 0xe; +pub const VDISCARD: usize = 0x10; +pub const VTIME: usize = 0x7; +pub const IXON: ::tcflag_t = 0x200; +pub const IXOFF: ::tcflag_t = 0x400; +pub const ONLCR: ::tcflag_t = 0x2; +pub const CSIZE: ::tcflag_t = 0x300; +pub const CS6: ::tcflag_t = 0x100; +pub const CS7: ::tcflag_t = 0x200; +pub const CS8: ::tcflag_t = 0x300; +pub const CSTOPB: ::tcflag_t = 0x400; +pub const CREAD: ::tcflag_t = 0x800; +pub const PARENB: ::tcflag_t = 0x1000; +pub const PARODD: ::tcflag_t = 0x2000; +pub const HUPCL: ::tcflag_t = 0x4000; +pub const CLOCAL: ::tcflag_t = 0x8000; +pub const ECHOKE: ::tcflag_t = 0x1; +pub const ECHOE: ::tcflag_t = 0x2; +pub const ECHOK: ::tcflag_t = 0x4; +pub const ECHONL: ::tcflag_t = 0x10; +pub const ECHOPRT: ::tcflag_t = 0x20; +pub const ECHOCTL: ::tcflag_t = 0x40; +pub const ISIG: ::tcflag_t = 0x80; +pub const ICANON: ::tcflag_t = 0x100; +pub const PENDIN: ::tcflag_t = 0x20000000; +pub const NOFLSH: ::tcflag_t = 0x80000000; + +pub const VEOL: usize = 6; +pub const VEOL2: usize = 8; +pub const VMIN: usize = 5; +pub const IEXTEN: ::tcflag_t = 0x400; +pub const TOSTOP: ::tcflag_t = 0x400000; +pub const FLUSHO: ::tcflag_t = 0x800000; +pub const EXTPROC: ::tcflag_t = 0x10000000; +pub const TCGETS: ::c_ulong = 0x403c7413; +pub const TCSETS: ::c_ulong = 0x803c7414; +pub const TCSETSW: ::c_ulong = 0x803c7415; +pub const TCSETSF: ::c_ulong = 0x803c7416; +pub const TCGETA: ::c_ulong = 0x40147417; +pub const TCSETA: ::c_ulong = 0x80147418; +pub const TCSETAW: ::c_ulong = 0x80147419; +pub const TCSETAF: ::c_ulong = 0x8014741c; +pub const TCSBRK: ::c_ulong = 0x2000741d; +pub const TCXONC: ::c_ulong = 0x2000741e; +pub const TCFLSH: ::c_ulong = 0x2000741f; +pub const TIOCINQ: ::c_ulong = 0x4004667f; +pub const TIOCGPGRP: ::c_ulong = 0x40047477; +pub const TIOCSPGRP: ::c_ulong = 0x80047476; +pub const TIOCOUTQ: ::c_ulong = 0x40047473; +pub const TIOCGWINSZ: ::c_ulong = 0x40087468; +pub const TIOCSWINSZ: ::c_ulong = 0x80087467; +pub const FIONREAD: ::c_ulong = 0x4004667f; diff --git a/src/liblibc/src/unix/notbsd/linux/other/b32/x86.rs b/src/liblibc/src/unix/notbsd/linux/other/b32/x86.rs index e6d78deae3..86abd1eb10 100644 --- a/src/liblibc/src/unix/notbsd/linux/other/b32/x86.rs +++ b/src/liblibc/src/unix/notbsd/linux/other/b32/x86.rs @@ -35,6 +35,75 @@ s! { pub uc_sigmask: ::sigset_t, __private: [u8; 112], } + + pub struct ipc_perm { + pub __key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::c_ushort, + __pad1: ::c_ushort, + pub __seq: ::c_ushort, + __pad2: ::c_ushort, + __unused1: ::c_ulong, + __unused2: ::c_ulong + } + + pub struct stat64 { + pub st_dev: ::dev_t, + __pad1: ::c_uint, + __st_ino: ::ino_t, + pub st_mode: ::mode_t, + pub st_nlink: ::nlink_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + pub st_rdev: ::dev_t, + __pad2: ::c_uint, + pub st_size: ::off64_t, + pub st_blksize: ::blksize_t, + pub st_blocks: ::blkcnt64_t, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + pub st_ino: ::ino64_t, + } + + pub struct shmid_ds { + pub shm_perm: ::ipc_perm, + pub shm_segsz: ::size_t, + pub shm_atime: ::time_t, + __unused1: ::c_ulong, + pub shm_dtime: ::time_t, + __unused2: ::c_ulong, + pub shm_ctime: ::time_t, + __unused3: ::c_ulong, + pub shm_cpid: ::pid_t, + pub shm_lpid: ::pid_t, + pub shm_nattch: ::shmatt_t, + __unused4: ::c_ulong, + __unused5: ::c_ulong + } + + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + pub msg_stime: ::time_t, + __glibc_reserved1: ::c_ulong, + pub msg_rtime: ::time_t, + __glibc_reserved2: ::c_ulong, + pub msg_ctime: ::time_t, + __glibc_reserved3: ::c_ulong, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __glibc_reserved4: ::c_ulong, + __glibc_reserved5: ::c_ulong, + } } pub const O_DIRECT: ::c_int = 0x4000; @@ -59,6 +128,80 @@ pub const FIONBIO: ::c_ulong = 0x5421; pub const SYS_gettid: ::c_long = 224; pub const SYS_perf_event_open: ::c_long = 336; +pub const PTRACE_GETFPXREGS: ::c_uint = 18; +pub const PTRACE_SETFPXREGS: ::c_uint = 19; + +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; + +pub const SIGSTKSZ: ::size_t = 8192; +pub const CBAUD: ::tcflag_t = 0o0010017; +pub const TAB1: ::c_int = 0x00000800; +pub const TAB2: ::c_int = 0x00001000; +pub const TAB3: ::c_int = 0x00001800; +pub const CR1: ::c_int = 0x00000200; +pub const CR2: ::c_int = 0x00000400; +pub const CR3: ::c_int = 0x00000600; +pub const FF1: ::c_int = 0x00008000; +pub const BS1: ::c_int = 0x00002000; +pub const VT1: ::c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: ::tcflag_t = 0x00000400; +pub const IXOFF: ::tcflag_t = 0x00001000; +pub const ONLCR: ::tcflag_t = 0x4; +pub const CSIZE: ::tcflag_t = 0x00000030; +pub const CS6: ::tcflag_t = 0x00000010; +pub const CS7: ::tcflag_t = 0x00000020; +pub const CS8: ::tcflag_t = 0x00000030; +pub const CSTOPB: ::tcflag_t = 0x00000040; +pub const CREAD: ::tcflag_t = 0x00000080; +pub const PARENB: ::tcflag_t = 0x00000100; +pub const PARODD: ::tcflag_t = 0x00000200; +pub const HUPCL: ::tcflag_t = 0x00000400; +pub const CLOCAL: ::tcflag_t = 0x00000800; +pub const ECHOKE: ::tcflag_t = 0x00000800; +pub const ECHOE: ::tcflag_t = 0x00000010; +pub const ECHOK: ::tcflag_t = 0x00000020; +pub const ECHONL: ::tcflag_t = 0x00000040; +pub const ECHOPRT: ::tcflag_t = 0x00000400; +pub const ECHOCTL: ::tcflag_t = 0x00000200; +pub const ISIG: ::tcflag_t = 0x00000001; +pub const ICANON: ::tcflag_t = 0x00000002; +pub const PENDIN: ::tcflag_t = 0x00004000; +pub const NOFLSH: ::tcflag_t = 0x00000080; + +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: ::tcflag_t = 0x00008000; +pub const TOSTOP: ::tcflag_t = 0x00000100; +pub const FLUSHO: ::tcflag_t = 0x00001000; +pub const EXTPROC: ::tcflag_t = 0x00010000; +pub const TCGETS: ::c_ulong = 0x5401; +pub const TCSETS: ::c_ulong = 0x5402; +pub const TCSETSW: ::c_ulong = 0x5403; +pub const TCSETSF: ::c_ulong = 0x5404; +pub const TCGETA: ::c_ulong = 0x5405; +pub const TCSETA: ::c_ulong = 0x5406; +pub const TCSETAW: ::c_ulong = 0x5407; +pub const TCSETAF: ::c_ulong = 0x5408; +pub const TCSBRK: ::c_ulong = 0x5409; +pub const TCXONC: ::c_ulong = 0x540A; +pub const TCFLSH: ::c_ulong = 0x540B; +pub const TIOCINQ: ::c_ulong = 0x541B; +pub const TIOCGPGRP: ::c_ulong = 0x540F; +pub const TIOCSPGRP: ::c_ulong = 0x5410; +pub const TIOCOUTQ: ::c_ulong = 0x5411; +pub const TIOCGWINSZ: ::c_ulong = 0x5413; +pub const TIOCSWINSZ: ::c_ulong = 0x5414; +pub const FIONREAD: ::c_ulong = 0x541B; + extern { pub fn getcontext(ucp: *mut ucontext_t) -> ::c_int; pub fn setcontext(ucp: *const ucontext_t) -> ::c_int; diff --git a/src/liblibc/src/unix/notbsd/linux/other/b64/aarch64.rs b/src/liblibc/src/unix/notbsd/linux/other/b64/aarch64.rs index e977868da3..1da95b1020 100644 --- a/src/liblibc/src/unix/notbsd/linux/other/b64/aarch64.rs +++ b/src/liblibc/src/unix/notbsd/linux/other/b64/aarch64.rs @@ -53,6 +53,32 @@ s! { pub struct pthread_attr_t { __size: [u64; 8] } + + pub struct ipc_perm { + pub __key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::c_uint, + pub __seq: ::c_ushort, + __pad1: ::c_ushort, + __unused1: ::c_ulong, + __unused2: ::c_ulong + } + + pub struct shmid_ds { + pub shm_perm: ::ipc_perm, + pub shm_segsz: ::size_t, + pub shm_atime: ::time_t, + pub shm_dtime: ::time_t, + pub shm_ctime: ::time_t, + pub shm_cpid: ::pid_t, + pub shm_lpid: ::pid_t, + pub shm_nattch: ::shmatt_t, + __unused4: ::c_ulong, + __unused5: ::c_ulong + } } pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 8; @@ -79,3 +105,74 @@ pub const FIONBIO: ::c_ulong = 0x5421; pub const SYS_gettid: ::c_long = 178; pub const SYS_perf_event_open: ::c_long = 241; + +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; + +pub const SIGSTKSZ: ::size_t = 16384; +pub const CBAUD: ::tcflag_t = 0o0010017; +pub const TAB1: ::c_int = 0x00000800; +pub const TAB2: ::c_int = 0x00001000; +pub const TAB3: ::c_int = 0x00001800; +pub const CR1: ::c_int = 0x00000200; +pub const CR2: ::c_int = 0x00000400; +pub const CR3: ::c_int = 0x00000600; +pub const FF1: ::c_int = 0x00008000; +pub const BS1: ::c_int = 0x00002000; +pub const VT1: ::c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: ::tcflag_t = 0x00000400; +pub const IXOFF: ::tcflag_t = 0x00001000; +pub const ONLCR: ::tcflag_t = 0x4; +pub const CSIZE: ::tcflag_t = 0x00000030; +pub const CS6: ::tcflag_t = 0x00000010; +pub const CS7: ::tcflag_t = 0x00000020; +pub const CS8: ::tcflag_t = 0x00000030; +pub const CSTOPB: ::tcflag_t = 0x00000040; +pub const CREAD: ::tcflag_t = 0x00000080; +pub const PARENB: ::tcflag_t = 0x00000100; +pub const PARODD: ::tcflag_t = 0x00000200; +pub const HUPCL: ::tcflag_t = 0x00000400; +pub const CLOCAL: ::tcflag_t = 0x00000800; +pub const ECHOKE: ::tcflag_t = 0x00000800; +pub const ECHOE: ::tcflag_t = 0x00000010; +pub const ECHOK: ::tcflag_t = 0x00000020; +pub const ECHONL: ::tcflag_t = 0x00000040; +pub const ECHOPRT: ::tcflag_t = 0x00000400; +pub const ECHOCTL: ::tcflag_t = 0x00000200; +pub const ISIG: ::tcflag_t = 0x00000001; +pub const ICANON: ::tcflag_t = 0x00000002; +pub const PENDIN: ::tcflag_t = 0x00004000; +pub const NOFLSH: ::tcflag_t = 0x00000080; + +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: ::tcflag_t = 0x00008000; +pub const TOSTOP: ::tcflag_t = 0x00000100; +pub const FLUSHO: ::tcflag_t = 0x00001000; +pub const EXTPROC: ::tcflag_t = 0x00010000; +pub const TCGETS: ::c_ulong = 0x5401; +pub const TCSETS: ::c_ulong = 0x5402; +pub const TCSETSW: ::c_ulong = 0x5403; +pub const TCSETSF: ::c_ulong = 0x5404; +pub const TCGETA: ::c_ulong = 0x5405; +pub const TCSETA: ::c_ulong = 0x5406; +pub const TCSETAW: ::c_ulong = 0x5407; +pub const TCSETAF: ::c_ulong = 0x5408; +pub const TCSBRK: ::c_ulong = 0x5409; +pub const TCXONC: ::c_ulong = 0x540A; +pub const TCFLSH: ::c_ulong = 0x540B; +pub const TIOCINQ: ::c_ulong = 0x541B; +pub const TIOCGPGRP: ::c_ulong = 0x540F; +pub const TIOCSPGRP: ::c_ulong = 0x5410; +pub const TIOCOUTQ: ::c_ulong = 0x5411; +pub const TIOCGWINSZ: ::c_ulong = 0x5413; +pub const TIOCSWINSZ: ::c_ulong = 0x5414; +pub const FIONREAD: ::c_ulong = 0x541B; diff --git a/src/liblibc/src/unix/notbsd/linux/other/b64/mod.rs b/src/liblibc/src/unix/notbsd/linux/other/b64/mod.rs index ccf99881f7..352dba843d 100644 --- a/src/liblibc/src/unix/notbsd/linux/other/b64/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/other/b64/mod.rs @@ -31,6 +31,20 @@ s! { pub mem_unit: ::c_uint, pub _f: [::c_char; 0], } + + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + pub msg_stime: ::time_t, + pub msg_rtime: ::time_t, + pub msg_ctime: ::time_t, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __glibc_reserved4: ::c_ulong, + __glibc_reserved5: ::c_ulong, + } } pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; diff --git a/src/liblibc/src/unix/notbsd/linux/other/b64/powerpc64.rs b/src/liblibc/src/unix/notbsd/linux/other/b64/powerpc64.rs index fca4204fa0..c5ce962e39 100644 --- a/src/liblibc/src/unix/notbsd/linux/other/b64/powerpc64.rs +++ b/src/liblibc/src/unix/notbsd/linux/other/b64/powerpc64.rs @@ -51,6 +51,32 @@ s! { pub struct pthread_attr_t { __size: [u64; 7] } + + pub struct ipc_perm { + pub __key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::mode_t, + pub __seq: ::uint32_t, + __pad1: ::uint32_t, + __unused1: ::uint64_t, + __unused2: ::c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: ::ipc_perm, + pub shm_atime: ::time_t, + pub shm_dtime: ::time_t, + pub shm_ctime: ::time_t, + pub shm_segsz: ::size_t, + pub shm_cpid: ::pid_t, + pub shm_lpid: ::pid_t, + pub shm_nattch: ::shmatt_t, + __unused4: ::c_ulong, + __unused5: ::c_ulong + } } pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; @@ -77,3 +103,74 @@ pub const FIONBIO: ::c_ulong = 0x8004667e; pub const SYS_gettid: ::c_long = 207; pub const SYS_perf_event_open: ::c_long = 319; + +pub const MCL_CURRENT: ::c_int = 0x2000; +pub const MCL_FUTURE: ::c_int = 0x4000; + +pub const SIGSTKSZ: ::size_t = 0x4000; +pub const CBAUD: ::tcflag_t = 0xff; +pub const TAB1: ::c_int = 0x400; +pub const TAB2: ::c_int = 0x800; +pub const TAB3: ::c_int = 0xc00; +pub const CR1: ::c_int = 0x1000; +pub const CR2: ::c_int = 0x2000; +pub const CR3: ::c_int = 0x3000; +pub const FF1: ::c_int = 0x4000; +pub const BS1: ::c_int = 0x8000; +pub const VT1: ::c_int = 0x10000; +pub const VWERASE: usize = 0xa; +pub const VREPRINT: usize = 0xb; +pub const VSUSP: usize = 0xc; +pub const VSTART: usize = 0xd; +pub const VSTOP: usize = 0xe; +pub const VDISCARD: usize = 0x10; +pub const VTIME: usize = 0x7; +pub const IXON: ::tcflag_t = 0x200; +pub const IXOFF: ::tcflag_t = 0x400; +pub const ONLCR: ::tcflag_t = 0x2; +pub const CSIZE: ::tcflag_t = 0x300; +pub const CS6: ::tcflag_t = 0x100; +pub const CS7: ::tcflag_t = 0x200; +pub const CS8: ::tcflag_t = 0x300; +pub const CSTOPB: ::tcflag_t = 0x400; +pub const CREAD: ::tcflag_t = 0x800; +pub const PARENB: ::tcflag_t = 0x1000; +pub const PARODD: ::tcflag_t = 0x2000; +pub const HUPCL: ::tcflag_t = 0x4000; +pub const CLOCAL: ::tcflag_t = 0x8000; +pub const ECHOKE: ::tcflag_t = 0x1; +pub const ECHOE: ::tcflag_t = 0x2; +pub const ECHOK: ::tcflag_t = 0x4; +pub const ECHONL: ::tcflag_t = 0x10; +pub const ECHOPRT: ::tcflag_t = 0x20; +pub const ECHOCTL: ::tcflag_t = 0x40; +pub const ISIG: ::tcflag_t = 0x80; +pub const ICANON: ::tcflag_t = 0x100; +pub const PENDIN: ::tcflag_t = 0x20000000; +pub const NOFLSH: ::tcflag_t = 0x80000000; + +pub const VEOL: usize = 6; +pub const VEOL2: usize = 8; +pub const VMIN: usize = 5; +pub const IEXTEN: ::tcflag_t = 0x400; +pub const TOSTOP: ::tcflag_t = 0x400000; +pub const FLUSHO: ::tcflag_t = 0x800000; +pub const EXTPROC: ::tcflag_t = 0x10000000; +pub const TCGETS: ::c_ulong = 0x403c7413; +pub const TCSETS: ::c_ulong = 0x803c7414; +pub const TCSETSW: ::c_ulong = 0x803c7415; +pub const TCSETSF: ::c_ulong = 0x803c7416; +pub const TCGETA: ::c_ulong = 0x40147417; +pub const TCSETA: ::c_ulong = 0x80147418; +pub const TCSETAW: ::c_ulong = 0x80147419; +pub const TCSETAF: ::c_ulong = 0x8014741c; +pub const TCSBRK: ::c_ulong = 0x2000741d; +pub const TCXONC: ::c_ulong = 0x2000741e; +pub const TCFLSH: ::c_ulong = 0x2000741f; +pub const TIOCINQ: ::c_ulong = 0x4004667f; +pub const TIOCGPGRP: ::c_ulong = 0x40047477; +pub const TIOCSPGRP: ::c_ulong = 0x80047476; +pub const TIOCOUTQ: ::c_ulong = 0x40047473; +pub const TIOCGWINSZ: ::c_ulong = 0x40087468; +pub const TIOCSWINSZ: ::c_ulong = 0x80087467; +pub const FIONREAD: ::c_ulong = 0x4004667f; diff --git a/src/liblibc/src/unix/notbsd/linux/other/b64/x86_64.rs b/src/liblibc/src/unix/notbsd/linux/other/b64/x86_64.rs index 052df2e29f..69295e6d5d 100644 --- a/src/liblibc/src/unix/notbsd/linux/other/b64/x86_64.rs +++ b/src/liblibc/src/unix/notbsd/linux/other/b64/x86_64.rs @@ -91,6 +91,33 @@ s! { pub uc_sigmask: ::sigset_t, __private: [u8; 512], } + + pub struct ipc_perm { + pub __key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::c_ushort, + __pad1: ::c_ushort, + pub __seq: ::c_ushort, + __pad2: ::c_ushort, + __unused1: ::c_ulong, + __unused2: ::c_ulong + } + + pub struct shmid_ds { + pub shm_perm: ::ipc_perm, + pub shm_segsz: ::size_t, + pub shm_atime: ::time_t, + pub shm_dtime: ::time_t, + pub shm_ctime: ::time_t, + pub shm_cpid: ::pid_t, + pub shm_lpid: ::pid_t, + pub shm_nattch: ::shmatt_t, + __unused4: ::c_ulong, + __unused5: ::c_ulong + } } pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; @@ -122,10 +149,92 @@ pub const PTRACE_GETFPXREGS: ::c_uint = 18; pub const PTRACE_SETFPXREGS: ::c_uint = 19; pub const PTRACE_GETREGS: ::c_uint = 12; pub const PTRACE_SETREGS: ::c_uint = 13; +pub const PTRACE_O_EXITKILL: ::c_uint = 1048576; +pub const PTRACE_O_TRACECLONE: ::c_uint = 8; +pub const PTRACE_O_TRACEEXEC: ::c_uint = 16; +pub const PTRACE_O_TRACEEXIT: ::c_uint = 64; +pub const PTRACE_O_TRACEFORK: ::c_uint = 2; +pub const PTRACE_O_TRACESYSGOOD: ::c_uint = 1; +pub const PTRACE_O_TRACEVFORK: ::c_uint = 4; +pub const PTRACE_O_TRACEVFORKDONE: ::c_uint = 32; +pub const PTRACE_O_TRACESECCOMP: ::c_uint = 128; +pub const PTRACE_O_SUSPEND_SECCOMP: ::c_uint = 2097152; +pub const PTRACE_PEEKSIGINFO_SHARED: ::c_uint = 1; pub const SYS_gettid: ::c_long = 186; pub const SYS_perf_event_open: ::c_long = 298; +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; + +pub const SIGSTKSZ: ::size_t = 8192; +pub const CBAUD: ::tcflag_t = 0o0010017; +pub const TAB1: ::c_int = 0x00000800; +pub const TAB2: ::c_int = 0x00001000; +pub const TAB3: ::c_int = 0x00001800; +pub const CR1: ::c_int = 0x00000200; +pub const CR2: ::c_int = 0x00000400; +pub const CR3: ::c_int = 0x00000600; +pub const FF1: ::c_int = 0x00008000; +pub const BS1: ::c_int = 0x00002000; +pub const VT1: ::c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: ::tcflag_t = 0x00000400; +pub const IXOFF: ::tcflag_t = 0x00001000; +pub const ONLCR: ::tcflag_t = 0x4; +pub const CSIZE: ::tcflag_t = 0x00000030; +pub const CS6: ::tcflag_t = 0x00000010; +pub const CS7: ::tcflag_t = 0x00000020; +pub const CS8: ::tcflag_t = 0x00000030; +pub const CSTOPB: ::tcflag_t = 0x00000040; +pub const CREAD: ::tcflag_t = 0x00000080; +pub const PARENB: ::tcflag_t = 0x00000100; +pub const PARODD: ::tcflag_t = 0x00000200; +pub const HUPCL: ::tcflag_t = 0x00000400; +pub const CLOCAL: ::tcflag_t = 0x00000800; +pub const ECHOKE: ::tcflag_t = 0x00000800; +pub const ECHOE: ::tcflag_t = 0x00000010; +pub const ECHOK: ::tcflag_t = 0x00000020; +pub const ECHONL: ::tcflag_t = 0x00000040; +pub const ECHOPRT: ::tcflag_t = 0x00000400; +pub const ECHOCTL: ::tcflag_t = 0x00000200; +pub const ISIG: ::tcflag_t = 0x00000001; +pub const ICANON: ::tcflag_t = 0x00000002; +pub const PENDIN: ::tcflag_t = 0x00004000; +pub const NOFLSH: ::tcflag_t = 0x00000080; + +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: ::tcflag_t = 0x00008000; +pub const TOSTOP: ::tcflag_t = 0x00000100; +pub const FLUSHO: ::tcflag_t = 0x00001000; +pub const EXTPROC: ::tcflag_t = 0x00010000; +pub const TCGETS: ::c_ulong = 0x5401; +pub const TCSETS: ::c_ulong = 0x5402; +pub const TCSETSW: ::c_ulong = 0x5403; +pub const TCSETSF: ::c_ulong = 0x5404; +pub const TCGETA: ::c_ulong = 0x5405; +pub const TCSETA: ::c_ulong = 0x5406; +pub const TCSETAW: ::c_ulong = 0x5407; +pub const TCSETAF: ::c_ulong = 0x5408; +pub const TCSBRK: ::c_ulong = 0x5409; +pub const TCXONC: ::c_ulong = 0x540A; +pub const TCFLSH: ::c_ulong = 0x540B; +pub const TIOCINQ: ::c_ulong = 0x541B; +pub const TIOCGPGRP: ::c_ulong = 0x540F; +pub const TIOCSPGRP: ::c_ulong = 0x5410; +pub const TIOCOUTQ: ::c_ulong = 0x5411; +pub const TIOCGWINSZ: ::c_ulong = 0x5413; +pub const TIOCSWINSZ: ::c_ulong = 0x5414; +pub const FIONREAD: ::c_ulong = 0x541B; + extern { pub fn getcontext(ucp: *mut ucontext_t) -> ::c_int; pub fn setcontext(ucp: *const ucontext_t) -> ::c_int; diff --git a/src/liblibc/src/unix/notbsd/linux/other/mod.rs b/src/liblibc/src/unix/notbsd/linux/other/mod.rs index 2449c6878a..b100cec54b 100644 --- a/src/liblibc/src/unix/notbsd/linux/other/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/other/mod.rs @@ -4,6 +4,40 @@ pub type rlim_t = c_ulong; pub type __priority_which_t = ::c_uint; s! { + pub struct __exit_status { + pub e_termination: ::c_short, + pub e_exit: ::c_short, + } + + pub struct __timeval { + pub tv_sec: ::int32_t, + pub tv_usec: ::int32_t, + } + + pub struct utmpx { + pub ut_type: ::c_short, + pub ut_pid: ::pid_t, + pub ut_line: [::c_char; __UT_LINESIZE], + pub ut_id: [::c_char; 4], + + pub ut_user: [::c_char; __UT_NAMESIZE], + pub ut_host: [::c_char; __UT_HOSTSIZE], + pub ut_exit: __exit_status, + + #[cfg(any(target_arch = "aarch64", target_pointer_width = "32"))] + pub ut_session: ::c_long, + #[cfg(any(target_arch = "aarch64", target_pointer_width = "32"))] + pub ut_tv: ::timeval, + + #[cfg(not(any(target_arch = "aarch64", target_pointer_width = "32")))] + pub ut_session: ::int32_t, + #[cfg(not(any(target_arch = "aarch64", target_pointer_width = "32")))] + pub ut_tv: __timeval, + + pub ut_addr_v6: [::int32_t; 4], + __glibc_reserved: [::c_char; 20], + } + pub struct sigaction { pub sa_sigaction: ::sighandler_t, pub sa_mask: ::sigset_t, @@ -89,39 +123,6 @@ s! { pub l_pid: ::pid_t, } - pub struct ipc_perm { - pub __key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::c_ushort, - __pad1: ::c_ushort, - pub __seq: ::c_ushort, - __pad2: ::c_ushort, - __unused1: ::c_ulong, - __unused2: ::c_ulong - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - #[cfg(target_pointer_width = "32")] - __unused1: ::c_ulong, - pub shm_dtime: ::time_t, - #[cfg(target_pointer_width = "32")] - __unused2: ::c_ulong, - pub shm_ctime: ::time_t, - #[cfg(target_pointer_width = "32")] - __unused3: ::c_ulong, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::shmatt_t, - __unused4: ::c_ulong, - __unused5: ::c_ulong - } - // FIXME this is actually a union pub struct sem_t { #[cfg(target_pointer_width = "32")] @@ -132,6 +133,20 @@ s! { } } +pub const __UT_LINESIZE: usize = 32; +pub const __UT_NAMESIZE: usize = 32; +pub const __UT_HOSTSIZE: usize = 256; +pub const EMPTY: ::c_short = 0; +pub const RUN_LVL: ::c_short = 1; +pub const BOOT_TIME: ::c_short = 2; +pub const NEW_TIME: ::c_short = 3; +pub const OLD_TIME: ::c_short = 4; +pub const INIT_PROCESS: ::c_short = 5; +pub const LOGIN_PROCESS: ::c_short = 6; +pub const USER_PROCESS: ::c_short = 7; +pub const DEAD_PROCESS: ::c_short = 8; +pub const ACCOUNTING: ::c_short = 9; + pub const RLIMIT_RSS: ::c_int = 5; pub const RLIMIT_NOFILE: ::c_int = 7; pub const RLIMIT_AS: ::c_int = 9; @@ -330,19 +345,24 @@ pub const SIG_SETMASK: ::c_int = 2; pub const SIG_BLOCK: ::c_int = 0x000000; pub const SIG_UNBLOCK: ::c_int = 0x01; +pub const POLLRDNORM: ::c_short = 0x040; +pub const POLLWRNORM: ::c_short = 0x100; +pub const POLLRDBAND: ::c_short = 0x080; +pub const POLLWRBAND: ::c_short = 0x200; + pub const FALLOC_FL_KEEP_SIZE: ::c_int = 0x01; pub const FALLOC_FL_PUNCH_HOLE: ::c_int = 0x02; pub const BUFSIZ: ::c_uint = 8192; pub const TMP_MAX: ::c_uint = 238328; pub const FOPEN_MAX: ::c_uint = 16; +pub const POSIX_FADV_DONTNEED: ::c_int = 4; +pub const POSIX_FADV_NOREUSE: ::c_int = 5; pub const POSIX_MADV_DONTNEED: ::c_int = 4; pub const _SC_2_C_VERSION: ::c_int = 96; -pub const RUSAGE_THREAD: ::c_int = 1; pub const O_ACCMODE: ::c_int = 3; pub const O_ASYNC: ::c_int = 0x2000; pub const O_NDELAY: ::c_int = 0x800; -pub const RUSAGE_CHILDREN: ::c_int = -1; pub const ST_RELATIME: ::c_ulong = 4096; pub const NI_MAXHOST: ::socklen_t = 1025; @@ -374,18 +394,10 @@ pub const TMPFS_MAGIC: ::c_long = 0x01021994; pub const USBDEVICE_SUPER_MAGIC: ::c_long = 0x00009fa2; pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: ::tcflag_t = 0x00008000; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const FLUSHO: ::tcflag_t = 0x00001000; pub const IUTF8: ::tcflag_t = 0x00004000; pub const CPU_SETSIZE: ::c_int = 0x400; -pub const EXTPROC: ::tcflag_t = 0x00010000; - pub const QFMT_VFS_V1: ::c_int = 4; pub const PTRACE_TRACEME: ::c_uint = 0; @@ -438,36 +450,18 @@ pub const TCSANOW: ::c_int = 0; pub const TCSADRAIN: ::c_int = 1; pub const TCSAFLUSH: ::c_int = 2; -pub const TCGETS: ::c_ulong = 0x5401; -pub const TCSETS: ::c_ulong = 0x5402; -pub const TCSETSW: ::c_ulong = 0x5403; -pub const TCSETSF: ::c_ulong = 0x5404; -pub const TCGETA: ::c_ulong = 0x5405; -pub const TCSETA: ::c_ulong = 0x5406; -pub const TCSETAW: ::c_ulong = 0x5407; -pub const TCSETAF: ::c_ulong = 0x5408; -pub const TCSBRK: ::c_ulong = 0x5409; -pub const TCXONC: ::c_ulong = 0x540A; -pub const TCFLSH: ::c_ulong = 0x540B; pub const TIOCGSOFTCAR: ::c_ulong = 0x5419; pub const TIOCSSOFTCAR: ::c_ulong = 0x541A; -pub const TIOCINQ: ::c_ulong = 0x541B; pub const TIOCLINUX: ::c_ulong = 0x541C; pub const TIOCGSERIAL: ::c_ulong = 0x541E; pub const TIOCEXCL: ::c_ulong = 0x540C; pub const TIOCNXCL: ::c_ulong = 0x540D; pub const TIOCSCTTY: ::c_ulong = 0x540E; -pub const TIOCGPGRP: ::c_ulong = 0x540F; -pub const TIOCSPGRP: ::c_ulong = 0x5410; -pub const TIOCOUTQ: ::c_ulong = 0x5411; pub const TIOCSTI: ::c_ulong = 0x5412; -pub const TIOCGWINSZ: ::c_ulong = 0x5413; -pub const TIOCSWINSZ: ::c_ulong = 0x5414; pub const TIOCMGET: ::c_ulong = 0x5415; pub const TIOCMBIS: ::c_ulong = 0x5416; pub const TIOCMBIC: ::c_ulong = 0x5417; pub const TIOCMSET: ::c_ulong = 0x5418; -pub const FIONREAD: ::c_ulong = 0x541B; pub const TIOCCONS: ::c_ulong = 0x541D; pub const RTLD_DEEPBIND: ::c_int = 0x8; @@ -489,6 +483,69 @@ pub const LINUX_REBOOT_CMD_RESTART2: ::c_int = 0xA1B2C3D4; pub const LINUX_REBOOT_CMD_SW_SUSPEND: ::c_int = 0xD000FCE2; pub const LINUX_REBOOT_CMD_KEXEC: ::c_int = 0x45584543; +pub const NETLINK_ROUTE: ::c_int = 0; +pub const NETLINK_UNUSED: ::c_int = 1; +pub const NETLINK_USERSOCK: ::c_int = 2; +pub const NETLINK_FIREWALL: ::c_int = 3; +pub const NETLINK_SOCK_DIAG: ::c_int = 4; +pub const NETLINK_NFLOG: ::c_int = 5; +pub const NETLINK_XFRM: ::c_int = 6; +pub const NETLINK_SELINUX: ::c_int = 7; +pub const NETLINK_ISCSI: ::c_int = 8; +pub const NETLINK_AUDIT: ::c_int = 9; +pub const NETLINK_FIB_LOOKUP: ::c_int = 10; +pub const NETLINK_CONNECTOR: ::c_int = 11; +pub const NETLINK_NETFILTER: ::c_int = 12; +pub const NETLINK_IP6_FW: ::c_int = 13; +pub const NETLINK_DNRTMSG: ::c_int = 14; +pub const NETLINK_KOBJECT_UEVENT: ::c_int = 15; +pub const NETLINK_GENERIC: ::c_int = 16; +pub const NETLINK_SCSITRANSPORT: ::c_int = 18; +pub const NETLINK_ECRYPTFS: ::c_int = 19; +pub const NETLINK_RDMA: ::c_int = 20; +pub const NETLINK_CRYPTO: ::c_int = 21; +pub const NETLINK_INET_DIAG: ::c_int = NETLINK_SOCK_DIAG; + +pub const MAX_LINKS: ::c_int = 32; + +pub const NLM_F_REQUEST: ::c_int = 1; +pub const NLM_F_MULTI: ::c_int = 2; +pub const NLM_F_ACK: ::c_int = 4; +pub const NLM_F_ECHO: ::c_int = 8; +pub const NLM_F_DUMP_INTR: ::c_int = 16; +pub const NLM_F_DUMP_FILTERED: ::c_int = 32; + +pub const NLM_F_ROOT: ::c_int = 0x100; +pub const NLM_F_MATCH: ::c_int = 0x200; +pub const NLM_F_ATOMIC: ::c_int = 0x400; +pub const NLM_F_DUMP: ::c_int = NLM_F_ROOT | NLM_F_MATCH; + +pub const NLM_F_REPLACE: ::c_int = 0x100; +pub const NLM_F_EXCL: ::c_int = 0x200; +pub const NLM_F_CREATE: ::c_int = 0x400; +pub const NLM_F_APPEND: ::c_int = 0x800; + +pub const NLMSG_NOOP: ::c_int = 0x1; +pub const NLMSG_ERROR: ::c_int = 0x2; +pub const NLMSG_DONE: ::c_int = 0x3; +pub const NLMSG_OVERRUN: ::c_int = 0x4; +pub const NLMSG_MIN_TYPE: ::c_int = 0x10; + +pub const NETLINK_ADD_MEMBERSHIP: ::c_int = 1; +pub const NETLINK_DROP_MEMBERSHIP: ::c_int = 2; +pub const NETLINK_PKTINFO: ::c_int = 3; +pub const NETLINK_BROADCAST_ERROR: ::c_int = 4; +pub const NETLINK_NO_ENOBUFS: ::c_int = 5; +pub const NETLINK_RX_RING: ::c_int = 6; +pub const NETLINK_TX_RING: ::c_int = 7; +pub const NETLINK_LISTEN_ALL_NSID: ::c_int = 8; +pub const NETLINK_LIST_MEMBERSHIPS: ::c_int = 9; +pub const NETLINK_CAP_ACK: ::c_int = 10; + +pub const NLA_F_NESTED: ::c_int = 1 << 15; +pub const NLA_F_NET_BYTEORDER: ::c_int = 1 << 14; +pub const NLA_TYPE_MASK: ::c_int = !(NLA_F_NESTED | NLA_F_NET_BYTEORDER); + cfg_if! { if #[cfg(any(target_arch = "arm", target_arch = "x86", target_arch = "x86_64"))] { @@ -498,6 +555,16 @@ cfg_if! { } } +extern { + pub fn utmpxname(file: *const ::c_char) -> ::c_int; + pub fn getutxent() -> *mut utmpx; + pub fn getutxid(ut: *const utmpx) -> *mut utmpx; + pub fn getutxline(ut: *const utmpx) -> *mut utmpx; + pub fn pututxline(ut: *const utmpx) -> *mut utmpx; + pub fn setutxent(); + pub fn endutxent(); +} + #[link(name = "util")] extern { pub fn sysctl(name: *mut ::c_int, diff --git a/src/liblibc/src/unix/notbsd/linux/s390x.rs b/src/liblibc/src/unix/notbsd/linux/s390x.rs new file mode 100644 index 0000000000..be12d72fc5 --- /dev/null +++ b/src/liblibc/src/unix/notbsd/linux/s390x.rs @@ -0,0 +1,679 @@ +pub type blkcnt_t = i64; +pub type blksize_t = i64; +pub type c_char = u8; +pub type c_long = i64; +pub type c_ulong = u64; +pub type fsblkcnt_t = u64; +pub type fsfilcnt_t = u64; +pub type ino_t = u64; +pub type nlink_t = u64; +pub type off_t = i64; +pub type rlim_t = u64; +pub type suseconds_t = i64; +pub type time_t = i64; +pub type wchar_t = i32; +pub type greg_t = u64; +pub type clock_t = i64; +pub type __fsword_t = ::c_long; +pub type __priority_which_t = ::c_uint; + +s! { + pub struct stat { + pub st_dev: ::dev_t, + pub st_ino: ::ino_t, + pub st_nlink: ::nlink_t, + pub st_mode: ::mode_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + st_pad0: ::c_int, + pub st_rdev: ::dev_t, + pub st_size: ::off_t, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + pub st_blksize: ::blksize_t, + pub st_blocks: ::blkcnt_t, + __glibc_reserved: [::c_long; 3], + } + + pub struct stat64 { + pub st_dev: ::dev_t, + pub st_ino: ::ino64_t, + pub st_nlink: ::nlink_t, + pub st_mode: ::mode_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + st_pad0: ::c_int, + pub st_rdev: ::dev_t, + pub st_size: ::off_t, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + pub st_blksize: ::blksize_t, + pub st_blocks: ::blkcnt64_t, + __glibc_reserved: [::c_long; 3], + } + + pub struct pthread_attr_t { + __size: [::c_ulong; 7] + } + + pub struct sigaction { + pub sa_sigaction: ::sighandler_t, + __glibc_reserved0: ::c_int, + pub sa_flags: ::c_int, + _restorer: *mut ::c_void, + pub sa_mask: sigset_t, + } + + pub struct stack_t { + pub ss_sp: *mut ::c_void, + pub ss_flags: ::c_int, + pub ss_size: ::size_t, + } + + pub struct sigset_t { + __size: [::c_ulong; 16], + } + + pub struct siginfo_t { + pub si_signo: ::c_int, + pub si_errno: ::c_int, + pub si_code: ::c_int, + _pad: ::c_int, + _pad2: [::c_long; 14], + } + + pub struct ipc_perm { + pub __key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::mode_t, + pub __seq: ::c_ushort, + __pad1: ::c_ushort, + __unused1: ::c_ulong, + __unused2: ::c_ulong + } + + pub struct shmid_ds { + pub shm_perm: ::ipc_perm, + pub shm_segsz: ::size_t, + pub shm_atime: ::time_t, + pub shm_dtime: ::time_t, + pub shm_ctime: ::time_t, + pub shm_cpid: ::pid_t, + pub shm_lpid: ::pid_t, + pub shm_nattch: ::shmatt_t, + __unused4: ::c_ulong, + __unused5: ::c_ulong + } + + pub struct statfs { + pub f_type: ::c_uint, + pub f_bsize: ::c_uint, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_fsid: ::fsid_t, + pub f_namelen: ::c_uint, + pub f_frsize: ::c_uint, + pub f_flags: ::c_uint, + f_spare: [::c_uint; 4], + } + + pub struct msghdr { + pub msg_name: *mut ::c_void, + pub msg_namelen: ::socklen_t, + pub msg_iov: *mut ::iovec, + pub msg_iovlen: ::size_t, + pub msg_control: *mut ::c_void, + pub msg_controllen: ::size_t, + pub msg_flags: ::c_int, + } + + pub struct termios { + pub c_iflag: ::tcflag_t, + pub c_oflag: ::tcflag_t, + pub c_cflag: ::tcflag_t, + pub c_lflag: ::tcflag_t, + pub c_line: ::cc_t, + pub c_cc: [::cc_t; ::NCCS], + pub c_ispeed: ::speed_t, + pub c_ospeed: ::speed_t, + } + + pub struct sysinfo { + pub uptime: ::c_long, + pub loads: [::c_ulong; 3], + pub totalram: ::c_ulong, + pub freeram: ::c_ulong, + pub sharedram: ::c_ulong, + pub bufferram: ::c_ulong, + pub totalswap: ::c_ulong, + pub freeswap: ::c_ulong, + pub procs: ::c_ushort, + pub pad: ::c_ushort, + pub totalhigh: ::c_ulong, + pub freehigh: ::c_ulong, + pub mem_unit: ::c_uint, + pub _f: [::c_char; 0], + } + + pub struct glob64_t { + pub gl_pathc: ::size_t, + pub gl_pathv: *mut *mut ::c_char, + pub gl_offs: ::size_t, + pub gl_flags: ::c_int, + + __unused1: *mut ::c_void, + __unused2: *mut ::c_void, + __unused3: *mut ::c_void, + __unused4: *mut ::c_void, + __unused5: *mut ::c_void, + } + + pub struct ucred { + pub pid: ::pid_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + } + + pub struct flock { + pub l_type: ::c_short, + pub l_whence: ::c_short, + pub l_start: ::off_t, + pub l_len: ::off_t, + pub l_pid: ::pid_t, + } + + // FIXME this is actually a union + pub struct sem_t { + __size: [::c_char; 32], + __align: [::c_long; 0], + } + + pub struct __psw_t { + pub mask: u64, + pub addr: u64, + } + + // FIXME: This is actually a union. + pub struct fpreg_t { + pub d: ::c_double, + // f: ::c_float, + } + + pub struct fpregset_t { + pub fpc: u32, + __pad: u32, + pub fprs: [fpreg_t; 16], + } + + pub struct mcontext_t { + pub psw: __psw_t, + pub gregs: [u64; 16], + pub aregs: [u32; 16], + pub fpregs: fpregset_t, + } + + pub struct ucontext_t { + pub uc_flags: ::c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: ::stack_t, + pub uc_mcontext: mcontext_t, + pub uc_sigmask: ::sigset_t, + } + + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + pub msg_stime: ::time_t, + pub msg_rtime: ::time_t, + pub msg_ctime: ::time_t, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __glibc_reserved4: ::c_ulong, + __glibc_reserved5: ::c_ulong, + } +} + +pub const POSIX_FADV_DONTNEED: ::c_int = 6; +pub const POSIX_FADV_NOREUSE: ::c_int = 7; + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; + +pub const EADDRINUSE: ::c_int = 98; +pub const EADDRNOTAVAIL: ::c_int = 99; +pub const ECONNABORTED: ::c_int = 103; +pub const ECONNREFUSED: ::c_int = 111; +pub const ECONNRESET: ::c_int = 104; +pub const EDEADLK: ::c_int = 35; +pub const ENOSYS: ::c_int = 38; +pub const ENOTCONN: ::c_int = 107; +pub const ETIMEDOUT: ::c_int = 110; +pub const FIOCLEX: ::c_ulong = 0x5451; +pub const FIONBIO: ::c_ulong = 0x5421; +pub const MAP_ANON: ::c_int = 0x20; +pub const O_ACCMODE: ::c_int = 3; +pub const O_APPEND: ::c_int = 1024; +pub const O_CREAT: ::c_int = 64; +pub const O_EXCL: ::c_int = 128; +pub const O_NONBLOCK: ::c_int = 2048; +pub const PTHREAD_STACK_MIN: ::size_t = 16384; +pub const RLIM_INFINITY: ::rlim_t = 0xffffffffffffffff; +pub const SA_NOCLDWAIT: ::c_int = 2; +pub const SA_ONSTACK: ::c_int = 0x08000000; +pub const SA_SIGINFO: ::c_int = 4; +pub const SIGBUS: ::c_int = 7; +pub const SIGSTKSZ: ::size_t = 0x2000; +pub const SIG_SETMASK: ::c_int = 2; +pub const SOCK_DGRAM: ::c_int = 2; +pub const SOCK_STREAM: ::c_int = 1; +pub const SOL_SOCKET: ::c_int = 1; +pub const SO_BROADCAST: ::c_int = 6; +pub const SO_ERROR: ::c_int = 4; +pub const SO_RCVTIMEO: ::c_int = 20; +pub const SO_REUSEADDR: ::c_int = 2; +pub const SO_SNDTIMEO: ::c_int = 21; + +pub const RLIMIT_RSS: ::c_int = 5; +pub const RLIMIT_NOFILE: ::c_int = 7; +pub const RLIMIT_AS: ::c_int = 9; +pub const RLIMIT_NPROC: ::c_int = 6; +pub const RLIMIT_MEMLOCK: ::c_int = 8; +pub const RLIMIT_RTTIME: ::c_int = 15; +pub const RLIMIT_NLIMITS: ::c_int = 16; + +pub const O_NOCTTY: ::c_int = 256; +pub const O_SYNC: ::c_int = 1052672; +pub const O_RSYNC: ::c_int = 1052672; +pub const O_DSYNC: ::c_int = 4096; +pub const O_FSYNC: ::c_int = 0x101000; +pub const O_DIRECT: ::c_int = 0x4000; +pub const O_DIRECTORY: ::c_int = 0x10000; +pub const O_NOFOLLOW: ::c_int = 0x20000; + +pub const SOCK_NONBLOCK: ::c_int = O_NONBLOCK; + +pub const LC_PAPER: ::c_int = 7; +pub const LC_NAME: ::c_int = 8; +pub const LC_ADDRESS: ::c_int = 9; +pub const LC_TELEPHONE: ::c_int = 10; +pub const LC_MEASUREMENT: ::c_int = 11; +pub const LC_IDENTIFICATION: ::c_int = 12; +pub const LC_PAPER_MASK: ::c_int = (1 << LC_PAPER); +pub const LC_NAME_MASK: ::c_int = (1 << LC_NAME); +pub const LC_ADDRESS_MASK: ::c_int = (1 << LC_ADDRESS); +pub const LC_TELEPHONE_MASK: ::c_int = (1 << LC_TELEPHONE); +pub const LC_MEASUREMENT_MASK: ::c_int = (1 << LC_MEASUREMENT); +pub const LC_IDENTIFICATION_MASK: ::c_int = (1 << LC_IDENTIFICATION); +pub const LC_ALL_MASK: ::c_int = ::LC_CTYPE_MASK + | ::LC_NUMERIC_MASK + | ::LC_TIME_MASK + | ::LC_COLLATE_MASK + | ::LC_MONETARY_MASK + | ::LC_MESSAGES_MASK + | LC_PAPER_MASK + | LC_NAME_MASK + | LC_ADDRESS_MASK + | LC_TELEPHONE_MASK + | LC_MEASUREMENT_MASK + | LC_IDENTIFICATION_MASK; + +pub const MAP_ANONYMOUS: ::c_int = 0x0020; +pub const MAP_GROWSDOWN: ::c_int = 0x0100; +pub const MAP_DENYWRITE: ::c_int = 0x0800; +pub const MAP_EXECUTABLE: ::c_int = 0x01000; +pub const MAP_LOCKED: ::c_int = 0x02000; +pub const MAP_NORESERVE: ::c_int = 0x04000; +pub const MAP_POPULATE: ::c_int = 0x08000; +pub const MAP_NONBLOCK: ::c_int = 0x010000; +pub const MAP_STACK: ::c_int = 0x020000; + +pub const EDEADLOCK: ::c_int = 35; +pub const ENAMETOOLONG: ::c_int = 36; +pub const ENOLCK: ::c_int = 37; +pub const ENOTEMPTY: ::c_int = 39; +pub const ELOOP: ::c_int = 40; +pub const ENOMSG: ::c_int = 42; +pub const EIDRM: ::c_int = 43; +pub const ECHRNG: ::c_int = 44; +pub const EL2NSYNC: ::c_int = 45; +pub const EL3HLT: ::c_int = 46; +pub const EL3RST: ::c_int = 47; +pub const ELNRNG: ::c_int = 48; +pub const EUNATCH: ::c_int = 49; +pub const ENOCSI: ::c_int = 50; +pub const EL2HLT: ::c_int = 51; +pub const EBADE: ::c_int = 52; +pub const EBADR: ::c_int = 53; +pub const EXFULL: ::c_int = 54; +pub const ENOANO: ::c_int = 55; +pub const EBADRQC: ::c_int = 56; +pub const EBADSLT: ::c_int = 57; +pub const EMULTIHOP: ::c_int = 72; +pub const EOVERFLOW: ::c_int = 75; +pub const ENOTUNIQ: ::c_int = 76; +pub const EBADFD: ::c_int = 77; +pub const EBADMSG: ::c_int = 74; +pub const EREMCHG: ::c_int = 78; +pub const ELIBACC: ::c_int = 79; +pub const ELIBBAD: ::c_int = 80; +pub const ELIBSCN: ::c_int = 81; +pub const ELIBMAX: ::c_int = 82; +pub const ELIBEXEC: ::c_int = 83; +pub const EILSEQ: ::c_int = 84; +pub const ERESTART: ::c_int = 85; +pub const ESTRPIPE: ::c_int = 86; +pub const EUSERS: ::c_int = 87; +pub const ENOTSOCK: ::c_int = 88; +pub const EDESTADDRREQ: ::c_int = 89; +pub const EMSGSIZE: ::c_int = 90; +pub const EPROTOTYPE: ::c_int = 91; +pub const ENOPROTOOPT: ::c_int = 92; +pub const EPROTONOSUPPORT: ::c_int = 93; +pub const ESOCKTNOSUPPORT: ::c_int = 94; +pub const EOPNOTSUPP: ::c_int = 95; +pub const EPFNOSUPPORT: ::c_int = 96; +pub const EAFNOSUPPORT: ::c_int = 97; +pub const ENETDOWN: ::c_int = 100; +pub const ENETUNREACH: ::c_int = 101; +pub const ENETRESET: ::c_int = 102; +pub const ENOBUFS: ::c_int = 105; +pub const EISCONN: ::c_int = 106; +pub const ESHUTDOWN: ::c_int = 108; +pub const ETOOMANYREFS: ::c_int = 109; +pub const EHOSTDOWN: ::c_int = 112; +pub const EHOSTUNREACH: ::c_int = 113; +pub const EALREADY: ::c_int = 114; +pub const EINPROGRESS: ::c_int = 115; +pub const ESTALE: ::c_int = 116; +pub const EUCLEAN: ::c_int = 117; +pub const ENOTNAM: ::c_int = 118; +pub const ENAVAIL: ::c_int = 119; +pub const EISNAM: ::c_int = 120; +pub const EREMOTEIO: ::c_int = 121; +pub const EDQUOT: ::c_int = 122; +pub const ENOMEDIUM: ::c_int = 123; +pub const EMEDIUMTYPE: ::c_int = 124; +pub const ECANCELED: ::c_int = 125; +pub const ENOKEY: ::c_int = 126; +pub const EKEYEXPIRED: ::c_int = 127; +pub const EKEYREVOKED: ::c_int = 128; +pub const EKEYREJECTED: ::c_int = 129; +pub const EOWNERDEAD: ::c_int = 130; +pub const ENOTRECOVERABLE: ::c_int = 131; +pub const EHWPOISON: ::c_int = 133; +pub const ERFKILL: ::c_int = 132; + +pub const SOCK_SEQPACKET: ::c_int = 5; + +pub const SO_TYPE: ::c_int = 3; +pub const SO_DONTROUTE: ::c_int = 5; +pub const SO_SNDBUF: ::c_int = 7; +pub const SO_RCVBUF: ::c_int = 8; +pub const SO_KEEPALIVE: ::c_int = 9; +pub const SO_OOBINLINE: ::c_int = 10; +pub const SO_LINGER: ::c_int = 13; +pub const SO_REUSEPORT: ::c_int = 15; +pub const SO_PEERCRED: ::c_int = 17; +pub const SO_RCVLOWAT: ::c_int = 18; +pub const SO_SNDLOWAT: ::c_int = 19; +pub const SO_ACCEPTCONN: ::c_int = 30; + +pub const TCP_COOKIE_TRANSACTIONS: ::c_int = 15; +pub const TCP_THIN_LINEAR_TIMEOUTS: ::c_int = 16; +pub const TCP_THIN_DUPACK: ::c_int = 17; +pub const TCP_USER_TIMEOUT: ::c_int = 18; +pub const TCP_REPAIR: ::c_int = 19; +pub const TCP_REPAIR_QUEUE: ::c_int = 20; +pub const TCP_QUEUE_SEQ: ::c_int = 21; +pub const TCP_REPAIR_OPTIONS: ::c_int = 22; +pub const TCP_FASTOPEN: ::c_int = 23; +pub const TCP_TIMESTAMP: ::c_int = 24; + +pub const SIGCHLD: ::c_int = 17; +pub const SIGUSR1: ::c_int = 10; +pub const SIGUSR2: ::c_int = 12; +pub const SIGCONT: ::c_int = 18; +pub const SIGSTOP: ::c_int = 19; +pub const SIGTSTP: ::c_int = 20; +pub const SIGURG: ::c_int = 23; +pub const SIGIO: ::c_int = 29; +pub const SIGSYS: ::c_int = 31; +pub const SIGSTKFLT: ::c_int = 16; +pub const SIGUNUSED: ::c_int = 31; +pub const SIGTTIN: ::c_int = 21; +pub const SIGTTOU: ::c_int = 22; +pub const SIGXCPU: ::c_int = 24; +pub const SIGXFSZ: ::c_int = 25; +pub const SIGVTALRM: ::c_int = 26; +pub const SIGPROF: ::c_int = 27; +pub const SIGWINCH: ::c_int = 28; +pub const SIGPOLL: ::c_int = 29; +pub const SIGPWR: ::c_int = 30; +pub const SIG_BLOCK: ::c_int = 0x000000; +pub const SIG_UNBLOCK: ::c_int = 0x01; + +pub const FALLOC_FL_KEEP_SIZE: ::c_int = 0x01; +pub const FALLOC_FL_PUNCH_HOLE: ::c_int = 0x02; + +pub const BUFSIZ: ::c_uint = 8192; +pub const TMP_MAX: ::c_uint = 238328; +pub const FOPEN_MAX: ::c_uint = 16; +pub const POSIX_MADV_DONTNEED: ::c_int = 4; +pub const _SC_2_C_VERSION: ::c_int = 96; +pub const O_ASYNC: ::c_int = 0x2000; +pub const O_NDELAY: ::c_int = 0x800; +pub const ST_RELATIME: ::c_ulong = 4096; +pub const NI_MAXHOST: ::socklen_t = 1025; + +pub const ADFS_SUPER_MAGIC: ::c_int = 0x0000adf5; +pub const AFFS_SUPER_MAGIC: ::c_int = 0x0000adff; +pub const CODA_SUPER_MAGIC: ::c_int = 0x73757245; +pub const CRAMFS_MAGIC: ::c_int = 0x28cd3d45; +pub const EFS_SUPER_MAGIC: ::c_int = 0x00414a53; +pub const EXT2_SUPER_MAGIC: ::c_int = 0x0000ef53; +pub const EXT3_SUPER_MAGIC: ::c_int = 0x0000ef53; +pub const EXT4_SUPER_MAGIC: ::c_int = 0x0000ef53; +pub const HPFS_SUPER_MAGIC: ::c_int = 0xf995e849; +pub const HUGETLBFS_MAGIC: ::c_int = 0x958458f6; +pub const ISOFS_SUPER_MAGIC: ::c_int = 0x00009660; +pub const JFFS2_SUPER_MAGIC: ::c_int = 0x000072b6; +pub const MINIX_SUPER_MAGIC: ::c_int = 0x0000137f; +pub const MINIX_SUPER_MAGIC2: ::c_int = 0x0000138f; +pub const MINIX2_SUPER_MAGIC: ::c_int = 0x00002468; +pub const MINIX2_SUPER_MAGIC2: ::c_int = 0x00002478; +pub const MSDOS_SUPER_MAGIC: ::c_int = 0x00004d44; +pub const NCP_SUPER_MAGIC: ::c_int = 0x0000564c; +pub const NFS_SUPER_MAGIC: ::c_int = 0x00006969; +pub const OPENPROM_SUPER_MAGIC: ::c_int = 0x00009fa1; +pub const PROC_SUPER_MAGIC: ::c_int = 0x00009fa0; +pub const QNX4_SUPER_MAGIC: ::c_int = 0x0000002f; +pub const REISERFS_SUPER_MAGIC: ::c_int = 0x52654973; +pub const SMB_SUPER_MAGIC: ::c_int = 0x0000517b; +pub const TMPFS_MAGIC: ::c_int = 0x01021994; +pub const USBDEVICE_SUPER_MAGIC: ::c_int = 0x00009fa2; + +pub const VEOF: usize = 4; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: ::tcflag_t = 0x00008000; +pub const TOSTOP: ::tcflag_t = 0x00000100; +pub const FLUSHO: ::tcflag_t = 0x00001000; +pub const IUTF8: ::tcflag_t = 0x00004000; + +pub const CPU_SETSIZE: ::c_int = 0x400; + +pub const EXTPROC: ::tcflag_t = 0x00010000; + +pub const QFMT_VFS_V1: ::c_int = 4; + +pub const PTRACE_TRACEME: ::c_uint = 0; +pub const PTRACE_PEEKTEXT: ::c_uint = 1; +pub const PTRACE_PEEKDATA: ::c_uint = 2; +pub const PTRACE_PEEKUSER: ::c_uint = 3; +pub const PTRACE_POKETEXT: ::c_uint = 4; +pub const PTRACE_POKEDATA: ::c_uint = 5; +pub const PTRACE_POKEUSER: ::c_uint = 6; +pub const PTRACE_CONT: ::c_uint = 7; +pub const PTRACE_KILL: ::c_uint = 8; +pub const PTRACE_SINGLESTEP: ::c_uint = 9; +pub const PTRACE_GETREGS: ::c_uint = 12; +pub const PTRACE_SETREGS: ::c_uint = 13; +pub const PTRACE_GETFPREGS: ::c_uint = 14; +pub const PTRACE_SETFPREGS: ::c_uint = 15; +pub const PTRACE_ATTACH: ::c_uint = 16; +pub const PTRACE_DETACH: ::c_uint = 17; +pub const PTRACE_SYSCALL: ::c_uint = 24; +pub const PTRACE_SETOPTIONS: ::c_uint = 0x4200; +pub const PTRACE_GETEVENTMSG: ::c_uint = 0x4201; +pub const PTRACE_GETSIGINFO: ::c_uint = 0x4202; +pub const PTRACE_SETSIGINFO: ::c_uint = 0x4203; +pub const PTRACE_GETREGSET: ::c_uint = 0x4204; +pub const PTRACE_SETREGSET: ::c_uint = 0x4205; +pub const PTRACE_SEIZE: ::c_uint = 0x4206; +pub const PTRACE_INTERRUPT: ::c_uint = 0x4207; +pub const PTRACE_LISTEN: ::c_uint = 0x4208; +pub const PTRACE_PEEKSIGINFO: ::c_uint = 0x4209; + +pub const MADV_DODUMP: ::c_int = 17; +pub const MADV_DONTDUMP: ::c_int = 16; + +pub const EPOLLWAKEUP: ::c_int = 0x20000000; + +pub const MADV_HUGEPAGE: ::c_int = 14; +pub const MADV_NOHUGEPAGE: ::c_int = 15; +pub const MAP_HUGETLB: ::c_int = 0x040000; + +pub const EFD_NONBLOCK: ::c_int = 0x800; + +pub const F_GETLK: ::c_int = 5; +pub const F_GETOWN: ::c_int = 9; +pub const F_SETOWN: ::c_int = 8; +pub const F_SETLK: ::c_int = 6; +pub const F_SETLKW: ::c_int = 7; + +pub const SEEK_DATA: ::c_int = 3; +pub const SEEK_HOLE: ::c_int = 4; + +pub const SFD_NONBLOCK: ::c_int = 0x0800; + +pub const TCSANOW: ::c_int = 0; +pub const TCSADRAIN: ::c_int = 1; +pub const TCSAFLUSH: ::c_int = 2; + +pub const TCGETS: ::c_ulong = 0x5401; +pub const TCSETS: ::c_ulong = 0x5402; +pub const TCSETSW: ::c_ulong = 0x5403; +pub const TCSETSF: ::c_ulong = 0x5404; +pub const TCGETA: ::c_ulong = 0x5405; +pub const TCSETA: ::c_ulong = 0x5406; +pub const TCSETAW: ::c_ulong = 0x5407; +pub const TCSETAF: ::c_ulong = 0x5408; +pub const TCSBRK: ::c_ulong = 0x5409; +pub const TCXONC: ::c_ulong = 0x540A; +pub const TCFLSH: ::c_ulong = 0x540B; +pub const TIOCGSOFTCAR: ::c_ulong = 0x5419; +pub const TIOCSSOFTCAR: ::c_ulong = 0x541A; +pub const TIOCINQ: ::c_ulong = 0x541B; +pub const TIOCLINUX: ::c_ulong = 0x541C; +pub const TIOCGSERIAL: ::c_ulong = 0x541E; +pub const TIOCEXCL: ::c_ulong = 0x540C; +pub const TIOCNXCL: ::c_ulong = 0x540D; +pub const TIOCSCTTY: ::c_ulong = 0x540E; +pub const TIOCGPGRP: ::c_ulong = 0x540F; +pub const TIOCSPGRP: ::c_ulong = 0x5410; +pub const TIOCOUTQ: ::c_ulong = 0x5411; +pub const TIOCSTI: ::c_ulong = 0x5412; +pub const TIOCGWINSZ: ::c_ulong = 0x5413; +pub const TIOCSWINSZ: ::c_ulong = 0x5414; +pub const TIOCMGET: ::c_ulong = 0x5415; +pub const TIOCMBIS: ::c_ulong = 0x5416; +pub const TIOCMBIC: ::c_ulong = 0x5417; +pub const TIOCMSET: ::c_ulong = 0x5418; +pub const FIONREAD: ::c_ulong = 0x541B; +pub const TIOCCONS: ::c_ulong = 0x541D; + +pub const RTLD_DEEPBIND: ::c_int = 0x8; +pub const RTLD_GLOBAL: ::c_int = 0x100; +pub const RTLD_NOLOAD: ::c_int = 0x4; + +pub const LINUX_REBOOT_MAGIC1: ::c_int = 0xfee1dead; +pub const LINUX_REBOOT_MAGIC2: ::c_int = 672274793; +pub const LINUX_REBOOT_MAGIC2A: ::c_int = 85072278; +pub const LINUX_REBOOT_MAGIC2B: ::c_int = 369367448; +pub const LINUX_REBOOT_MAGIC2C: ::c_int = 537993216; + +pub const LINUX_REBOOT_CMD_RESTART: ::c_int = 0x01234567; +pub const LINUX_REBOOT_CMD_HALT: ::c_int = 0xCDEF0123; +pub const LINUX_REBOOT_CMD_CAD_ON: ::c_int = 0x89ABCDEF; +pub const LINUX_REBOOT_CMD_CAD_OFF: ::c_int = 0x00000000; +pub const LINUX_REBOOT_CMD_POWER_OFF: ::c_int = 0x4321FEDC; +pub const LINUX_REBOOT_CMD_RESTART2: ::c_int = 0xA1B2C3D4; +pub const LINUX_REBOOT_CMD_SW_SUSPEND: ::c_int = 0xD000FCE2; +pub const LINUX_REBOOT_CMD_KEXEC: ::c_int = 0x45584543; + +pub const SYS_gettid: ::c_long = 236; +pub const SYS_perf_event_open: ::c_long = 331; + +#[link(name = "util")] +extern { + pub fn sysctl(name: *mut ::c_int, + namelen: ::c_int, + oldp: *mut ::c_void, + oldlenp: *mut ::size_t, + newp: *mut ::c_void, + newlen: ::size_t) + -> ::c_int; + pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int; + pub fn backtrace(buf: *mut *mut ::c_void, + sz: ::c_int) -> ::c_int; + pub fn glob64(pattern: *const ::c_char, + flags: ::c_int, + errfunc: ::dox::Option ::c_int>, + pglob: *mut glob64_t) -> ::c_int; + pub fn globfree64(pglob: *mut glob64_t); + pub fn ptrace(request: ::c_uint, ...) -> ::c_long; + pub fn pthread_attr_getaffinity_np(attr: *const ::pthread_attr_t, + cpusetsize: ::size_t, + cpuset: *mut ::cpu_set_t) -> ::c_int; + pub fn pthread_attr_setaffinity_np(attr: *mut ::pthread_attr_t, + cpusetsize: ::size_t, + cpuset: *const ::cpu_set_t) -> ::c_int; + pub fn getpriority(which: ::__priority_which_t, who: ::id_t) -> ::c_int; + pub fn setpriority(which: ::__priority_which_t, who: ::id_t, + prio: ::c_int) -> ::c_int; + pub fn pthread_getaffinity_np(thread: ::pthread_t, + cpusetsize: ::size_t, + cpuset: *mut ::cpu_set_t) -> ::c_int; + pub fn pthread_setaffinity_np(thread: ::pthread_t, + cpusetsize: ::size_t, + cpuset: *const ::cpu_set_t) -> ::c_int; + pub fn sched_getcpu() -> ::c_int; + pub fn getcontext(ucp: *mut ucontext_t) -> ::c_int; + pub fn setcontext(ucp: *const ucontext_t) -> ::c_int; + pub fn makecontext(ucp: *mut ucontext_t, + func: extern fn (), + argc: ::c_int, ...); + pub fn swapcontext(uocp: *mut ucontext_t, + ucp: *const ucontext_t) -> ::c_int; +} diff --git a/src/liblibc/src/unix/notbsd/mod.rs b/src/liblibc/src/unix/notbsd/mod.rs index d625f1b721..a53211a2b8 100644 --- a/src/liblibc/src/unix/notbsd/mod.rs +++ b/src/liblibc/src/unix/notbsd/mod.rs @@ -6,6 +6,7 @@ pub type speed_t = ::c_uint; pub type tcflag_t = ::c_uint; pub type loff_t = ::c_longlong; pub type clockid_t = ::c_int; +pub type key_t = ::c_int; pub type id_t = ::c_uint; pub enum timezone {} @@ -219,6 +220,7 @@ pub const CLOCK_BOOTTIME_ALARM: clockid_t = 9; // 2014.) See also musl/mod.rs // pub const CLOCK_SGI_CYCLE: clockid_t = 10; // pub const CLOCK_TAI: clockid_t = 11; +pub const TIMER_ABSTIME: ::c_int = 1; pub const RLIMIT_CPU: ::c_int = 0; pub const RLIMIT_FSIZE: ::c_int = 1; @@ -307,9 +309,6 @@ pub const MAP_FIXED: ::c_int = 0x0010; pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void; -pub const MCL_CURRENT: ::c_int = 0x0001; -pub const MCL_FUTURE: ::c_int = 0x0002; - // MS_ flags for msync(2) pub const MS_ASYNC: ::c_int = 0x0001; pub const MS_INVALIDATE: ::c_int = 0x0002; @@ -416,11 +415,27 @@ pub const MADV_MERGEABLE: ::c_int = 12; pub const MADV_UNMERGEABLE: ::c_int = 13; pub const MADV_HWPOISON: ::c_int = 100; +pub const IFF_UP: ::c_int = 0x1; +pub const IFF_BROADCAST: ::c_int = 0x2; +pub const IFF_DEBUG: ::c_int = 0x4; pub const IFF_LOOPBACK: ::c_int = 0x8; +pub const IFF_POINTOPOINT: ::c_int = 0x10; +pub const IFF_NOTRAILERS: ::c_int = 0x20; +pub const IFF_RUNNING: ::c_int = 0x40; +pub const IFF_NOARP: ::c_int = 0x80; +pub const IFF_PROMISC: ::c_int = 0x100; +pub const IFF_ALLMULTI: ::c_int = 0x200; +pub const IFF_MASTER: ::c_int = 0x400; +pub const IFF_SLAVE: ::c_int = 0x800; +pub const IFF_MULTICAST: ::c_int = 0x1000; +pub const IFF_PORTSEL: ::c_int = 0x2000; +pub const IFF_AUTOMEDIA: ::c_int = 0x4000; +pub const IFF_DYNAMIC: ::c_int = 0x8000; pub const AF_UNIX: ::c_int = 1; pub const AF_INET: ::c_int = 2; pub const AF_INET6: ::c_int = 10; +pub const AF_NETLINK: ::c_int = 16; pub const SOCK_RAW: ::c_int = 3; pub const IPPROTO_TCP: ::c_int = 6; pub const IPPROTO_IP: ::c_int = 0; @@ -454,6 +469,8 @@ pub const IPV6_V6ONLY: ::c_int = 26; pub const SO_DEBUG: ::c_int = 1; +pub const MSG_NOSIGNAL: ::c_int = 0x4000; + pub const SHUT_RD: ::c_int = 0; pub const SHUT_WR: ::c_int = 1; pub const SHUT_RDWR: ::c_int = 2; @@ -463,8 +480,6 @@ pub const LOCK_EX: ::c_int = 2; pub const LOCK_NB: ::c_int = 4; pub const LOCK_UN: ::c_int = 8; -pub const SIGSTKSZ: ::size_t = 8192; - pub const SA_NODEFER: ::c_int = 0x40000000; pub const SA_RESETHAND: ::c_int = 0x80000000; pub const SA_RESTART: ::c_int = 0x10000000; @@ -512,8 +527,6 @@ pub const QIF_USAGE: ::uint32_t = 10; pub const QIF_TIMES: ::uint32_t = 48; pub const QIF_ALL: ::uint32_t = 63; -pub const CBAUD: ::tcflag_t = 0o0010017; - pub const EFD_CLOEXEC: ::c_int = 0x80000; pub const MNT_FORCE: ::c_int = 0x1; @@ -534,31 +547,15 @@ pub const TCIOFLUSH: ::c_int = 2; pub const NL0: ::c_int = 0x00000000; pub const NL1: ::c_int = 0x00000100; pub const TAB0: ::c_int = 0x00000000; -pub const TAB1: ::c_int = 0x00000800; -pub const TAB2: ::c_int = 0x00001000; -pub const TAB3: ::c_int = 0x00001800; pub const CR0: ::c_int = 0x00000000; -pub const CR1: ::c_int = 0x00000200; -pub const CR2: ::c_int = 0x00000400; -pub const CR3: ::c_int = 0x00000600; pub const FF0: ::c_int = 0x00000000; -pub const FF1: ::c_int = 0x00008000; pub const BS0: ::c_int = 0x00000000; -pub const BS1: ::c_int = 0x00002000; pub const VT0: ::c_int = 0x00000000; -pub const VT1: ::c_int = 0x00004000; pub const VERASE: usize = 2; -pub const VWERASE: usize = 14; pub const VKILL: usize = 3; -pub const VREPRINT: usize = 12; pub const VINTR: usize = 0; pub const VQUIT: usize = 1; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; pub const VLNEXT: usize = 15; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; pub const IGNBRK: ::tcflag_t = 0x00000001; pub const BRKINT: ::tcflag_t = 0x00000002; pub const IGNPAR: ::tcflag_t = 0x00000004; @@ -568,35 +565,12 @@ pub const ISTRIP: ::tcflag_t = 0x00000020; pub const INLCR: ::tcflag_t = 0x00000040; pub const IGNCR: ::tcflag_t = 0x00000080; pub const ICRNL: ::tcflag_t = 0x00000100; -pub const IXON: ::tcflag_t = 0x00000400; -pub const IXOFF: ::tcflag_t = 0x00001000; pub const IXANY: ::tcflag_t = 0x00000800; pub const IMAXBEL: ::tcflag_t = 0x00002000; pub const OPOST: ::tcflag_t = 0x1; -pub const ONLCR: ::tcflag_t = 0x4; -pub const CSIZE: ::tcflag_t = 0x00000030; pub const CS5: ::tcflag_t = 0x00000000; -pub const CS6: ::tcflag_t = 0x00000010; -pub const CS7: ::tcflag_t = 0x00000020; -pub const CS8: ::tcflag_t = 0x00000030; -pub const CSTOPB: ::tcflag_t = 0x00000040; -pub const CREAD: ::tcflag_t = 0x00000080; -pub const PARENB: ::tcflag_t = 0x00000100; -pub const PARODD: ::tcflag_t = 0x00000200; -pub const HUPCL: ::tcflag_t = 0x00000400; -pub const CLOCAL: ::tcflag_t = 0x00000800; pub const CRTSCTS: ::tcflag_t = 0x80000000; -pub const ECHOKE: ::tcflag_t = 0x00000800; -pub const ECHOE: ::tcflag_t = 0x00000010; -pub const ECHOK: ::tcflag_t = 0x00000020; pub const ECHO: ::tcflag_t = 0x00000008; -pub const ECHONL: ::tcflag_t = 0x00000040; -pub const ECHOPRT: ::tcflag_t = 0x00000400; -pub const ECHOCTL: ::tcflag_t = 0x00000200; -pub const ISIG: ::tcflag_t = 0x00000001; -pub const ICANON: ::tcflag_t = 0x00000002; -pub const PENDIN: ::tcflag_t = 0x00004000; -pub const NOFLSH: ::tcflag_t = 0x00000080; pub const CLONE_VM: ::c_int = 0x100; pub const CLONE_FS: ::c_int = 0x200; @@ -643,8 +617,6 @@ pub const POSIX_FADV_NORMAL: ::c_int = 0; pub const POSIX_FADV_RANDOM: ::c_int = 1; pub const POSIX_FADV_SEQUENTIAL: ::c_int = 2; pub const POSIX_FADV_WILLNEED: ::c_int = 3; -pub const POSIX_FADV_DONTNEED: ::c_int = 4; -pub const POSIX_FADV_NOREUSE: ::c_int = 5; pub const AT_FDCWD: ::c_int = -100; pub const AT_SYMLINK_NOFOLLOW: ::c_int = 0x100; @@ -715,6 +687,11 @@ f! { } extern { + pub fn getpwnam_r(name: *const ::c_char, + pwd: *mut passwd, + buf: *mut ::c_char, + buflen: ::size_t, + result: *mut *mut passwd) -> ::c_int; pub fn getpwuid_r(uid: ::uid_t, pwd: *mut passwd, buf: *mut ::c_char, @@ -725,6 +702,10 @@ extern { vec: *mut ::c_uchar) -> ::c_int; pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; + pub fn clock_nanosleep(clk_id: clockid_t, + flags: ::c_int, + rqtp: *const ::timespec, + rmtp: *mut ::timespec) -> ::c_int; pub fn prctl(option: ::c_int, ...) -> ::c_int; pub fn pthread_getattr_np(native: ::pthread_t, attr: *mut ::pthread_attr_t) -> ::c_int; @@ -856,6 +837,20 @@ extern { clock_id: *mut clockid_t) -> ::c_int; pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, clock_id: clockid_t) -> ::c_int; + pub fn sched_getaffinity(pid: ::pid_t, + cpusetsize: ::size_t, + cpuset: *mut cpu_set_t) -> ::c_int; + pub fn sched_setaffinity(pid: ::pid_t, + cpusetsize: ::size_t, + cpuset: *const cpu_set_t) -> ::c_int; + pub fn unshare(flags: ::c_int) -> ::c_int; + pub fn setns(fd: ::c_int, nstype: ::c_int) -> ::c_int; + pub fn sem_timedwait(sem: *mut sem_t, + abstime: *const ::timespec) -> ::c_int; + pub fn accept4(fd: ::c_int, addr: *mut ::sockaddr, len: *mut ::socklen_t, + flg: ::c_int) -> ::c_int; + pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, + abstime: *const ::timespec) -> ::c_int; } cfg_if! { diff --git a/src/liblibc/src/unix/solaris/mod.rs b/src/liblibc/src/unix/solaris/mod.rs index 5a31e5269b..63cd1249a4 100644 --- a/src/liblibc/src/unix/solaris/mod.rs +++ b/src/liblibc/src/unix/solaris/mod.rs @@ -432,6 +432,7 @@ pub const FIONBIO: ::c_int = 0x8004667e; pub const SIGCHLD: ::c_int = 18; pub const SIGBUS: ::c_int = 10; +pub const SIGINFO: ::c_int = 41; pub const SIG_BLOCK: ::c_int = 1; pub const SIG_UNBLOCK: ::c_int = 2; pub const SIG_SETMASK: ::c_int = 3; @@ -541,6 +542,7 @@ pub const SIGXCPU: ::c_int = 30; pub const SIGXFSZ: ::c_int = 31; pub const WNOHANG: ::c_int = 0x40; +pub const WUNTRACED: ::c_int = 0x04; pub const PROT_NONE: ::c_int = 0; pub const PROT_READ: ::c_int = 1; @@ -649,6 +651,8 @@ pub const EMULTIHOP: ::c_int = 74; pub const ENOLINK: ::c_int = 67; pub const EPROTO: ::c_int = 71; +pub const EAI_SYSTEM: ::c_int = 11; + pub const F_DUPFD: ::c_int = 0; pub const F_GETFD: ::c_int = 1; pub const F_SETFD: ::c_int = 2; @@ -724,6 +728,8 @@ pub const SIGSTKSZ: ::size_t = 8192; // __CLOCK_REALTIME0==0 is an obsoleted version of CLOCK_REALTIME==3 pub const CLOCK_REALTIME: clockid_t = 3; pub const CLOCK_MONOTONIC: clockid_t = 4; +pub const TIMER_RELTIME: ::c_int = 0; +pub const TIMER_ABSTIME: ::c_int = 1; pub const RLIMIT_CPU: ::c_int = 0; pub const RLIMIT_FSIZE: ::c_int = 1; @@ -949,6 +955,9 @@ f! { } extern { + pub fn getifaddrs(ifap: *mut *mut ::ifaddrs) -> ::c_int; + pub fn freeifaddrs(ifa: *mut ::ifaddrs); + pub fn stack_getbounds(sp: *mut ::stack_t) -> ::c_int; pub fn mincore(addr: *const ::c_void, len: ::size_t, vec: *mut c_char) -> ::c_int; @@ -959,6 +968,10 @@ extern { -> ::c_int; pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; + pub fn clock_nanosleep(clk_id: clockid_t, + flags: ::c_int, + rqtp: *const ::timespec, + rmtp: *mut ::timespec) -> ::c_int; pub fn getnameinfo(sa: *const ::sockaddr, salen: ::socklen_t, host: *mut ::c_char, @@ -966,10 +979,16 @@ extern { serv: *mut ::c_char, sevlen: ::socklen_t, flags: ::c_int) -> ::c_int; + pub fn getpwnam_r(name: *const ::c_char, + pwd: *mut passwd, + buf: *mut ::c_char, + buflen: ::c_int) -> *const passwd; pub fn getpwuid_r(uid: ::uid_t, pwd: *mut passwd, buf: *mut ::c_char, - buflen: ::size_t) -> *const passwd; + buflen: ::c_int) -> *const passwd; + pub fn setpwent(); + pub fn getpwent() -> *mut passwd; pub fn readdir(dirp: *mut ::DIR) -> *const ::dirent; pub fn fdatasync(fd: ::c_int) -> ::c_int; pub fn nl_langinfo_l(item: ::nl_item, locale: ::locale_t) -> *mut ::c_char; @@ -1021,5 +1040,8 @@ extern { clock_id: *mut clockid_t) -> ::c_int; pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, clock_id: clockid_t) -> ::c_int; + pub fn sem_timedwait(sem: *mut sem_t, + abstime: *const ::timespec) -> ::c_int; + pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, + abstime: *const ::timespec) -> ::c_int; } - diff --git a/src/liblibc/src/windows.rs b/src/liblibc/src/windows.rs index 5ba8b33451..b916fd4bd6 100644 --- a/src/liblibc/src/windows.rs +++ b/src/liblibc/src/windows.rs @@ -105,6 +105,47 @@ pub const LC_MONETARY: ::c_int = 3; pub const LC_NUMERIC: ::c_int = 4; pub const LC_TIME: ::c_int = 5; +pub const EPERM: ::c_int = 1; +pub const ENOENT: ::c_int = 2; +pub const ESRCH: ::c_int = 3; +pub const EINTR: ::c_int = 4; +pub const EIO: ::c_int = 5; +pub const ENXIO: ::c_int = 6; +pub const E2BIG: ::c_int = 7; +pub const ENOEXEC: ::c_int = 8; +pub const EBADF: ::c_int = 9; +pub const ECHILD: ::c_int = 10; +pub const EAGAIN: ::c_int = 11; +pub const ENOMEM: ::c_int = 12; +pub const EACCES: ::c_int = 13; +pub const EFAULT: ::c_int = 14; +pub const EBUSY: ::c_int = 16; +pub const EEXIST: ::c_int = 17; +pub const EXDEV: ::c_int = 18; +pub const ENODEV: ::c_int = 19; +pub const ENOTDIR: ::c_int = 20; +pub const EISDIR: ::c_int = 21; +pub const EINVAL: ::c_int = 22; +pub const ENFILE: ::c_int = 23; +pub const EMFILE: ::c_int = 24; +pub const ENOTTY: ::c_int = 25; +pub const EFBIG: ::c_int = 27; +pub const ENOSPC: ::c_int = 28; +pub const ESPIPE: ::c_int = 29; +pub const EROFS: ::c_int = 30; +pub const EMLINK: ::c_int = 31; +pub const EPIPE: ::c_int = 32; +pub const EDOM: ::c_int = 33; +pub const ERANGE: ::c_int = 34; +pub const EDEADLK: ::c_int = 36; +pub const EDEADLOCK: ::c_int = 36; +pub const ENAMETOOLONG: ::c_int = 38; +pub const ENOLCK: ::c_int = 39; +pub const ENOSYS: ::c_int = 40; +pub const ENOTEMPTY: ::c_int = 41; +pub const EILSEQ: ::c_int = 42; +pub const STRUNCATE: ::c_int = 80; + #[cfg(target_env = "msvc")] // " if " -- appease style checker #[link(name = "msvcrt")] extern {} diff --git a/src/libpanic_unwind/gcc.rs b/src/libpanic_unwind/gcc.rs index fdae8f69a9..33b24fbaa2 100644 --- a/src/libpanic_unwind/gcc.rs +++ b/src/libpanic_unwind/gcc.rs @@ -124,12 +124,15 @@ const UNWIND_DATA_REG: (i32, i32) = (0, 1); // RAX, RDX #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1 / X0, X1 -#[cfg(any(target_arch = "mips", target_arch = "mipsel"))] +#[cfg(any(target_arch = "mips", target_arch = "mipsel", target_arch = "mips64"))] const UNWIND_DATA_REG: (i32, i32) = (4, 5); // A0, A1 #[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))] const UNWIND_DATA_REG: (i32, i32) = (3, 4); // R3, R4 / X3, X4 +#[cfg(target_arch = "s390x")] +const UNWIND_DATA_REG: (i32, i32) = (6, 7); // R6, R7 + // The following code is based on GCC's C and C++ personality routines. For reference, see: // https://github.com/gcc-mirror/gcc/blob/master/libstdc++-v3/libsupc++/eh_personality.cc // https://github.com/gcc-mirror/gcc/blob/trunk/libgcc/unwind-c.c @@ -264,30 +267,6 @@ unsafe fn find_eh_action(context: *mut uw::_Unwind_Context) -> EHAction { eh::find_eh_action(lsda, &eh_context) } -// *** Delete after a new snapshot *** -#[cfg(all(stage0, any(target_os = "ios", not(target_arch = "arm"))))] -#[lang = "eh_personality_catch"] -#[no_mangle] -pub unsafe extern "C" fn rust_eh_personality_catch(version: c_int, - actions: uw::_Unwind_Action, - exception_class: uw::_Unwind_Exception_Class, - ue_header: *mut uw::_Unwind_Exception, - context: *mut uw::_Unwind_Context) - -> uw::_Unwind_Reason_Code { - rust_eh_personality(version, actions, exception_class, ue_header, context) -} - -// *** Delete after a new snapshot *** -#[cfg(all(stage0, target_arch = "arm", not(target_os = "ios")))] -#[lang = "eh_personality_catch"] -#[no_mangle] -pub unsafe extern "C" fn rust_eh_personality_catch(state: uw::_Unwind_State, - ue_header: *mut uw::_Unwind_Exception, - context: *mut uw::_Unwind_Context) - -> uw::_Unwind_Reason_Code { - rust_eh_personality(state, ue_header, context) -} - // See docs in the `unwind` module. #[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))] #[lang = "eh_unwind_resume"] diff --git a/src/libpanic_unwind/seh.rs b/src/libpanic_unwind/seh.rs index dd6e92fe9a..5896421493 100644 --- a/src/libpanic_unwind/seh.rs +++ b/src/libpanic_unwind/seh.rs @@ -232,13 +232,13 @@ extern "C" { // Again, I'm not entirely sure what this is describing, it just seems to work. #[cfg_attr(not(test), lang = "msvc_try_filter")] static mut TYPE_DESCRIPTOR1: _TypeDescriptor = _TypeDescriptor { - pVFTable: &TYPE_INFO_VTABLE as *const _ as *const _, + pVFTable: unsafe { &TYPE_INFO_VTABLE } as *const _ as *const _, spare: 0 as *mut _, name: imp::NAME1, }; static mut TYPE_DESCRIPTOR2: _TypeDescriptor = _TypeDescriptor { - pVFTable: &TYPE_INFO_VTABLE as *const _ as *const _, + pVFTable: unsafe { &TYPE_INFO_VTABLE } as *const _ as *const _, spare: 0 as *mut _, name: imp::NAME2, }; diff --git a/src/libpanic_unwind/seh64_gnu.rs b/src/libpanic_unwind/seh64_gnu.rs index 3642e24889..e6d3920b29 100644 --- a/src/libpanic_unwind/seh64_gnu.rs +++ b/src/libpanic_unwind/seh64_gnu.rs @@ -81,21 +81,6 @@ pub unsafe fn cleanup(ptr: *mut u8) -> Box { // This is considered acceptable, because the behavior of throwing exceptions // through a C ABI boundary is undefined. -// *** Delete after a new snapshot *** -#[cfg(stage0)] -#[lang = "eh_personality_catch"] -#[cfg(not(test))] -unsafe extern "C" fn rust_eh_personality_catch(exceptionRecord: *mut c::EXCEPTION_RECORD, - establisherFrame: c::LPVOID, - contextRecord: *mut c::CONTEXT, - dispatcherContext: *mut c::DISPATCHER_CONTEXT) - -> c::EXCEPTION_DISPOSITION { - rust_eh_personality(exceptionRecord, - establisherFrame, - contextRecord, - dispatcherContext) -} - #[lang = "eh_personality"] #[cfg(not(test))] unsafe extern "C" fn rust_eh_personality(exceptionRecord: *mut c::EXCEPTION_RECORD, diff --git a/src/libproc_macro/Cargo.toml b/src/libproc_macro/Cargo.toml new file mode 100644 index 0000000000..99fb1d65cd --- /dev/null +++ b/src/libproc_macro/Cargo.toml @@ -0,0 +1,15 @@ +[package] +authors = ["The Rust Project Developers"] +name = "proc_macro" +version = "0.0.0" + +[lib] +name = "proc_macro" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +log = { path = "../liblog" } +rustc_plugin = { path = "../librustc_plugin" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/libproc_macro/build.rs b/src/libproc_macro/build.rs new file mode 100644 index 0000000000..7b7590b863 --- /dev/null +++ b/src/libproc_macro/build.rs @@ -0,0 +1,89 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +extern crate syntax; +extern crate syntax_pos; + +use syntax::ast::Ident; +use syntax::codemap::DUMMY_SP; +use syntax::parse::token::{self, Token, keywords, str_to_ident}; +use syntax::tokenstream::{self, TokenTree, TokenStream}; +use std::rc::Rc; + +/// A wrapper around `TokenStream::concat` to avoid extra namespace specification and +/// provide TokenStream concatenation as a generic operator. +pub fn concat(ts1: TokenStream, ts2: TokenStream) -> TokenStream { + TokenStream::concat(ts1, ts2) +} + +/// Checks if two identifiers have the same name, disregarding context. This allows us to +/// fake 'reserved' keywords. +// FIXME We really want `free-identifier-=?` (a la Dybvig 1993). von Tander 2007 is +// probably the easiest way to do that. +pub fn ident_eq(tident: &TokenTree, id: Ident) -> bool { + let tid = match *tident { + TokenTree::Token(_, Token::Ident(ref id)) => id, + _ => { + return false; + } + }; + + tid.name == id.name +} + +// ____________________________________________________________________________________________ +// Conversion operators + +/// Convert a `&str` into a Token. +pub fn str_to_token_ident(s: &str) -> Token { + Token::Ident(str_to_ident(s)) +} + +/// Converts a keyword (from `syntax::parse::token::keywords`) into a Token that +/// corresponds to it. +pub fn keyword_to_token_ident(kw: keywords::Keyword) -> Token { + Token::Ident(str_to_ident(&kw.name().as_str()[..])) +} + +// ____________________________________________________________________________________________ +// Build Procedures + +/// Generically takes a `ts` and delimiter and returns `ts` delimited by the specified +/// delimiter. +pub fn build_delimited(ts: TokenStream, delim: token::DelimToken) -> TokenStream { + let tts = ts.to_tts(); + TokenStream::from_tts(vec![TokenTree::Delimited(DUMMY_SP, + Rc::new(tokenstream::Delimited { + delim: delim, + open_span: DUMMY_SP, + tts: tts, + close_span: DUMMY_SP, + }))]) +} + +/// Takes `ts` and returns `[ts]`. +pub fn build_bracket_delimited(ts: TokenStream) -> TokenStream { + build_delimited(ts, token::DelimToken::Bracket) +} + +/// Takes `ts` and returns `{ts}`. +pub fn build_brace_delimited(ts: TokenStream) -> TokenStream { + build_delimited(ts, token::DelimToken::Brace) +} + +/// Takes `ts` and returns `(ts)`. +pub fn build_paren_delimited(ts: TokenStream) -> TokenStream { + build_delimited(ts, token::DelimToken::Paren) +} + +/// Constructs `()`. +pub fn build_empty_args() -> TokenStream { + build_paren_delimited(TokenStream::mk_empty()) +} diff --git a/src/libproc_macro/lib.rs b/src/libproc_macro/lib.rs new file mode 100644 index 0000000000..9e25cb88e0 --- /dev/null +++ b/src/libproc_macro/lib.rs @@ -0,0 +1,137 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! # Proc_Macro +//! +//! A library for procedural macro writers. +//! +//! ## Usage +//! This package provides the `qquote!` macro for syntax creation, and the prelude +//! (at libproc_macro::prelude) provides a number of operations: +//! - `concat`, for concatenating two TokenStreams. +//! - `ident_eq`, for checking if two identifiers are equal regardless of syntax context. +//! - `str_to_token_ident`, for converting an `&str` into a Token. +//! - `keyword_to_token_delim`, for converting a `parse::token::keywords::Keyword` into a +//! Token. +//! - `build_delimited`, for creating a new TokenStream from an existing one and a delimiter +//! by wrapping the TokenStream in the delimiter. +//! - `build_bracket_delimited`, `build_brace_delimited`, and `build_paren_delimited`, for +//! easing the above. +//! - `build_empty_args`, which returns a TokenStream containing `()`. +//! - `lex`, which takes an `&str` and returns the TokenStream it represents. +//! +//! The `qquote!` macro also imports `syntax::ext::proc_macro_shim::prelude::*`, so you +//! will need to `extern crate syntax` for usage. (This is a temporary solution until more +//! of the external API in libproc_macro is stabilized to support the token construction +//! operations that the qausiquoter relies on.) The shim file also provides additional +//! operations, such as `build_block_emitter` (as used in the `cond` example below). +//! +//! ## TokenStreams +//! +//! TokenStreams serve as the basis of the macro system. They are, in essence, vectors of +//! TokenTrees, where indexing treats delimited values as a single term. That is, the term +//! `even(a+c) && even(b)` will be indexibly encoded as `even | (a+c) | even | (b)` where, +//! in reality, `(a+c)` is actually a decorated pointer to `a | + | c`. +//! +//! If a user has a TokenStream that is a single, delimited value, they can use +//! `maybe_delimited` to destruct it and receive the internal vector as a new TokenStream +//! as: +//! ``` +//! `(a+c)`.maybe_delimited() ~> Some(a | + | c)` +//! ``` +//! +//! Check the TokenStream documentation for more information; the structure also provides +//! cheap concatenation and slicing. +//! +//! ## Quasiquotation +//! +//! The quasiquoter creates output that, when run, constructs the tokenstream specified as +//! input. For example, `qquote!(5 + 5)` will produce a program, that, when run, will +//! construct the TokenStream `5 | + | 5`. +//! +//! ### Unquoting +//! +//! Unquoting is currently done as `unquote`, and works by taking the single next +//! TokenTree in the TokenStream as the unquoted term. Ergonomically, `unquote(foo)` works +//! fine, but `unquote foo` is also supported. +//! +//! A simple example might be: +//! +//!``` +//!fn double(tmp: TokenStream) -> TokenStream { +//! qquote!(unquote(tmp) * 2) +//!} +//!``` +//! +//! ### Large Example: Implementing Scheme's `cond` +//! +//! Below is the full implementation of Scheme's `cond` operator. +//! +//! ``` +//! fn cond_rec(input: TokenStream) -> TokenStream { +//! if input.is_empty() { return quote!(); } +//! +//! let next = input.slice(0..1); +//! let rest = input.slice_from(1..); +//! +//! let clause : TokenStream = match next.maybe_delimited() { +//! Some(ts) => ts, +//! _ => panic!("Invalid input"), +//! }; +//! +//! // clause is ([test]) [rhs] +//! if clause.len() < 2 { panic!("Invalid macro usage in cond: {:?}", clause) } +//! +//! let test: TokenStream = clause.slice(0..1); +//! let rhs: TokenStream = clause.slice_from(1..); +//! +//! if ident_eq(&test[0], str_to_ident("else")) || rest.is_empty() { +//! quote!({unquote(rhs)}) +//! } else { +//! quote!({if unquote(test) { unquote(rhs) } else { cond!(unquote(rest)) } }) +//! } +//! } +//! ``` +//! + +#![crate_name = "proc_macro"] +#![unstable(feature = "rustc_private", issue = "27812")] +#![feature(plugin_registrar)] +#![crate_type = "dylib"] +#![crate_type = "rlib"] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] + +#![feature(staged_api)] +#![feature(rustc_diagnostic_macros)] +#![feature(rustc_private)] + +extern crate rustc_plugin; +extern crate syntax; +extern crate syntax_pos; +#[macro_use] extern crate log; + +mod qquote; +pub mod build; +pub mod parse; +pub mod prelude; +use qquote::qquote; + +use rustc_plugin::Registry; + +// ____________________________________________________________________________________________ +// Main macro definition + +#[plugin_registrar] +pub fn plugin_registrar(reg: &mut Registry) { + reg.register_macro("qquote", qquote); +} diff --git a/src/libproc_macro/parse.rs b/src/libproc_macro/parse.rs new file mode 100644 index 0000000000..9af8a68cdc --- /dev/null +++ b/src/libproc_macro/parse.rs @@ -0,0 +1,26 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Parsing utilities for writing procedural macros. + +extern crate syntax; + +use syntax::parse::{ParseSess, filemap_to_tts}; +use syntax::tokenstream::TokenStream; + +/// Map a string to tts, using a made-up filename. For example, `lex(15)` will return a +/// TokenStream containing the literal 15. +pub fn lex(source_str: &str) -> TokenStream { + let ps = ParseSess::new(); + TokenStream::from_tts(filemap_to_tts(&ps, + ps.codemap().new_filemap("procmacro_lex".to_string(), + None, + source_str.to_owned()))) +} diff --git a/src/libproc_macro/prelude.rs b/src/libproc_macro/prelude.rs new file mode 100644 index 0000000000..4c0c8ba6c6 --- /dev/null +++ b/src/libproc_macro/prelude.rs @@ -0,0 +1,12 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use build::*; +pub use parse::*; diff --git a/src/libproc_macro/qquote.rs b/src/libproc_macro/qquote.rs new file mode 100644 index 0000000000..67d0c77b00 --- /dev/null +++ b/src/libproc_macro/qquote.rs @@ -0,0 +1,470 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! # Quasiquoter +//! This file contains the implementation internals of the quasiquoter provided by `quote!`. +//! +//! ## Ouput +//! The quasiquoter produces output of the form: +//! let tmp0 = ...; +//! let tmp1 = ...; +//! ... +//! concat(from_tokens(...), concat(...)) +//! +//! To the more explicit, the quasiquoter produces a series of bindings that each +//! construct TokenStreams via constructing Tokens and using `from_tokens`, ultimately +//! invoking `concat` on these bindings (and inlined expressions) to construct a +//! TokenStream that resembles the output syntax. +//! + +extern crate rustc_plugin; +extern crate syntax; +extern crate syntax_pos; + +use build::*; +use parse::lex; +use qquote::int_build::*; + +use syntax::ast::Ident; +use syntax::codemap::Span; +use syntax::ext::base::*; +use syntax::ext::base; +use syntax::ext::proc_macro_shim::build_block_emitter; +use syntax::parse::token::{self, Token, gensym_ident, str_to_ident}; +use syntax::print::pprust; +use syntax::tokenstream::{TokenTree, TokenStream}; + +// ____________________________________________________________________________________________ +// Main definition +/// The user should use the macro, not this procedure. +pub fn qquote<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[TokenTree]) + -> Box { + + debug!("\nTTs in: {:?}\n", pprust::tts_to_string(&tts[..])); + let output = qquoter(cx, TokenStream::from_tts(tts.clone().to_owned())); + debug!("\nQQ out: {}\n", pprust::tts_to_string(&output.to_tts()[..])); + let imports = concat(lex("use syntax::ext::proc_macro_shim::prelude::*;"), + lex("use proc_macro::prelude::*;")); + build_block_emitter(cx, sp, build_brace_delimited(concat(imports, output))) +} + +// ____________________________________________________________________________________________ +// Datatype Definitions + +#[derive(Debug)] +struct QDelimited { + delim: token::DelimToken, + open_span: Span, + tts: Vec, + close_span: Span, +} + +#[derive(Debug)] +enum QTT { + TT(TokenTree), + QDL(QDelimited), + QIdent(TokenTree), +} + +type Bindings = Vec<(Ident, TokenStream)>; + +// ____________________________________________________________________________________________ +// Quasiquoter Algorithm +// This algorithm works as follows: +// Input: TokenStream +// 1. Walk the TokenStream, gathering up the unquoted expressions and marking them separately. +// 2. Hoist any unquoted term into its own let-binding via a gensym'd identifier +// 3. Convert the body from a `complex expression` into a simplified one via `convert_complex_tts +// 4. Stitch everything together with `concat`. +fn qquoter<'cx>(cx: &'cx mut ExtCtxt, ts: TokenStream) -> TokenStream { + if ts.is_empty() { + return lex("TokenStream::mk_empty()"); + } + let qq_res = qquote_iter(cx, 0, ts); + let mut bindings = qq_res.0; + let body = qq_res.1; + let mut cct_res = convert_complex_tts(cx, body); + + bindings.append(&mut cct_res.0); + + if bindings.is_empty() { + cct_res.1 + } else { + debug!("BINDINGS"); + for b in bindings.clone() { + debug!("{:?} = {}", b.0, pprust::tts_to_string(&b.1.to_tts()[..])); + } + TokenStream::concat(unravel(bindings), cct_res.1) + } +} + +fn qquote_iter<'cx>(cx: &'cx mut ExtCtxt, depth: i64, ts: TokenStream) -> (Bindings, Vec) { + let mut depth = depth; + let mut bindings: Bindings = Vec::new(); + let mut output: Vec = Vec::new(); + + let mut iter = ts.iter(); + + loop { + let next = iter.next(); + if next.is_none() { + break; + } + let next = next.unwrap().clone(); + match next { + TokenTree::Token(_, Token::Ident(id)) if is_unquote(id) => { + if depth == 0 { + let exp = iter.next(); + if exp.is_none() { + break; + } // produce an error or something first + let exp = vec![exp.unwrap().to_owned()]; + debug!("RHS: {:?}", exp.clone()); + let new_id = gensym_ident("tmp"); + debug!("RHS TS: {:?}", TokenStream::from_tts(exp.clone())); + debug!("RHS TS TT: {:?}", TokenStream::from_tts(exp.clone()).to_vec()); + bindings.push((new_id, TokenStream::from_tts(exp))); + debug!("BINDINGS"); + for b in bindings.clone() { + debug!("{:?} = {}", b.0, pprust::tts_to_string(&b.1.to_tts()[..])); + } + output.push(QTT::QIdent(as_tt(Token::Ident(new_id.clone())))); + } else { + depth = depth - 1; + output.push(QTT::TT(next.clone())); + } + } + TokenTree::Token(_, Token::Ident(id)) if is_qquote(id) => { + depth = depth + 1; + } + TokenTree::Delimited(_, ref dl) => { + let br = qquote_iter(cx, depth, TokenStream::from_tts(dl.tts.clone().to_owned())); + let mut bind_ = br.0; + let res_ = br.1; + bindings.append(&mut bind_); + + let new_dl = QDelimited { + delim: dl.delim, + open_span: dl.open_span, + tts: res_, + close_span: dl.close_span, + }; + + output.push(QTT::QDL(new_dl)); + } + t => { + output.push(QTT::TT(t)); + } + } + } + + (bindings, output) +} + +// ____________________________________________________________________________________________ +// Turns QQTs into a TokenStream and some Bindings. +/// Construct a chain of concatenations. +fn unravel_concats(tss: Vec) -> TokenStream { + let mut pushes: Vec = + tss.into_iter().filter(|&ref ts| !ts.is_empty()).collect(); + let mut output = match pushes.pop() { + Some(ts) => ts, + None => { + return TokenStream::mk_empty(); + } + }; + + while let Some(ts) = pushes.pop() { + output = build_fn_call(str_to_ident("concat"), + concat(concat(ts, + from_tokens(vec![Token::Comma])), + output)); + } + output +} + +/// This converts the vector of QTTs into a seet of Bindings for construction and the main +/// body as a TokenStream. +fn convert_complex_tts<'cx>(cx: &'cx mut ExtCtxt, tts: Vec) -> (Bindings, TokenStream) { + let mut pushes: Vec = Vec::new(); + let mut bindings: Bindings = Vec::new(); + + let mut iter = tts.into_iter(); + + loop { + let next = iter.next(); + if next.is_none() { + break; + } + let next = next.unwrap(); + match next { + QTT::TT(TokenTree::Token(_, t)) => { + let token_out = emit_token(t); + pushes.push(token_out); + } + // FIXME handle sequence repetition tokens + QTT::QDL(qdl) => { + debug!(" QDL: {:?} ", qdl.tts); + let new_id = gensym_ident("qdl_tmp"); + let mut cct_rec = convert_complex_tts(cx, qdl.tts); + bindings.append(&mut cct_rec.0); + bindings.push((new_id, cct_rec.1)); + + let sep = build_delim_tok(qdl.delim); + + pushes.push(build_mod_call(vec![str_to_ident("proc_macro"), + str_to_ident("build"), + str_to_ident("build_delimited")], + concat(from_tokens(vec![Token::Ident(new_id)]), + concat(lex(","), sep)))); + } + QTT::QIdent(t) => { + pushes.push(TokenStream::from_tts(vec![t])); + pushes.push(TokenStream::mk_empty()); + } + _ => panic!("Unhandled case!"), + } + + } + + (bindings, unravel_concats(pushes)) +} + +// ____________________________________________________________________________________________ +// Utilities + +/// Unravels Bindings into a TokenStream of `let` declarations. +fn unravel(binds: Bindings) -> TokenStream { + let mut output = TokenStream::mk_empty(); + + for b in binds { + output = concat(output, build_let(b.0, b.1)); + } + + output +} + +/// Checks if the Ident is `unquote`. +fn is_unquote(id: Ident) -> bool { + let qq = str_to_ident("unquote"); + id.name == qq.name // We disregard context; unquote is _reserved_ +} + +/// Checks if the Ident is `quote`. +fn is_qquote(id: Ident) -> bool { + let qq = str_to_ident("qquote"); + id.name == qq.name // We disregard context; qquote is _reserved_ +} + +mod int_build { + extern crate syntax; + extern crate syntax_pos; + + use parse::*; + use build::*; + + use syntax::ast::{self, Ident}; + use syntax::codemap::{DUMMY_SP}; + use syntax::parse::token::{self, Token, keywords, str_to_ident}; + use syntax::tokenstream::{TokenTree, TokenStream}; + + // ____________________________________________________________________________________________ + // Emitters + + pub fn emit_token(t: Token) -> TokenStream { + concat(lex("TokenStream::from_tokens"), + build_paren_delimited(build_vec(build_token_tt(t)))) + } + + pub fn emit_lit(l: token::Lit, n: Option) -> TokenStream { + let suf = match n { + Some(n) => format!("Some(ast::Name({}))", n.0), + None => "None".to_string(), + }; + + let lit = match l { + token::Lit::Byte(n) => format!("Lit::Byte(token::intern(\"{}\"))", n.to_string()), + token::Lit::Char(n) => format!("Lit::Char(token::intern(\"{}\"))", n.to_string()), + token::Lit::Integer(n) => format!("Lit::Integer(token::intern(\"{}\"))", n.to_string()), + token::Lit::Float(n) => format!("Lit::Float(token::intern(\"{}\"))", n.to_string()), + token::Lit::Str_(n) => format!("Lit::Str_(token::intern(\"{}\"))", n.to_string()), + token::Lit::ByteStr(n) => format!("Lit::ByteStr(token::intern(\"{}\"))", n.to_string()), + _ => panic!("Unsupported literal"), + }; + + let res = format!("Token::Literal({},{})", lit, suf); + debug!("{}", res); + lex(&res) + } + + // ____________________________________________________________________________________________ + // Token Builders + + pub fn build_binop_tok(bot: token::BinOpToken) -> TokenStream { + match bot { + token::BinOpToken::Plus => lex("Token::BinOp(BinOpToken::Plus)"), + token::BinOpToken::Minus => lex("Token::BinOp(BinOpToken::Minus)"), + token::BinOpToken::Star => lex("Token::BinOp(BinOpToken::Star)"), + token::BinOpToken::Slash => lex("Token::BinOp(BinOpToken::Slash)"), + token::BinOpToken::Percent => lex("Token::BinOp(BinOpToken::Percent)"), + token::BinOpToken::Caret => lex("Token::BinOp(BinOpToken::Caret)"), + token::BinOpToken::And => lex("Token::BinOp(BinOpToken::And)"), + token::BinOpToken::Or => lex("Token::BinOp(BinOpToken::Or)"), + token::BinOpToken::Shl => lex("Token::BinOp(BinOpToken::Shl)"), + token::BinOpToken::Shr => lex("Token::BinOp(BinOpToken::Shr)"), + } + } + + pub fn build_binopeq_tok(bot: token::BinOpToken) -> TokenStream { + match bot { + token::BinOpToken::Plus => lex("Token::BinOpEq(BinOpToken::Plus)"), + token::BinOpToken::Minus => lex("Token::BinOpEq(BinOpToken::Minus)"), + token::BinOpToken::Star => lex("Token::BinOpEq(BinOpToken::Star)"), + token::BinOpToken::Slash => lex("Token::BinOpEq(BinOpToken::Slash)"), + token::BinOpToken::Percent => lex("Token::BinOpEq(BinOpToken::Percent)"), + token::BinOpToken::Caret => lex("Token::BinOpEq(BinOpToken::Caret)"), + token::BinOpToken::And => lex("Token::BinOpEq(BinOpToken::And)"), + token::BinOpToken::Or => lex("Token::BinOpEq(BinOpToken::Or)"), + token::BinOpToken::Shl => lex("Token::BinOpEq(BinOpToken::Shl)"), + token::BinOpToken::Shr => lex("Token::BinOpEq(BinOpToken::Shr)"), + } + } + + pub fn build_delim_tok(dt: token::DelimToken) -> TokenStream { + match dt { + token::DelimToken::Paren => lex("DelimToken::Paren"), + token::DelimToken::Bracket => lex("DelimToken::Bracket"), + token::DelimToken::Brace => lex("DelimToken::Brace"), + token::DelimToken::NoDelim => lex("DelimToken::NoDelim"), + } + } + + pub fn build_token_tt(t: Token) -> TokenStream { + match t { + Token::Eq => lex("Token::Eq"), + Token::Lt => lex("Token::Lt"), + Token::Le => lex("Token::Le"), + Token::EqEq => lex("Token::EqEq"), + Token::Ne => lex("Token::Ne"), + Token::Ge => lex("Token::Ge"), + Token::Gt => lex("Token::Gt"), + Token::AndAnd => lex("Token::AndAnd"), + Token::OrOr => lex("Token::OrOr"), + Token::Not => lex("Token::Not"), + Token::Tilde => lex("Token::Tilde"), + Token::BinOp(tok) => build_binop_tok(tok), + Token::BinOpEq(tok) => build_binopeq_tok(tok), + Token::At => lex("Token::At"), + Token::Dot => lex("Token::Dot"), + Token::DotDot => lex("Token::DotDot"), + Token::DotDotDot => lex("Token::DotDotDot"), + Token::Comma => lex("Token::Comma"), + Token::Semi => lex("Token::Semi"), + Token::Colon => lex("Token::Colon"), + Token::ModSep => lex("Token::ModSep"), + Token::RArrow => lex("Token::RArrow"), + Token::LArrow => lex("Token::LArrow"), + Token::FatArrow => lex("Token::FatArrow"), + Token::Pound => lex("Token::Pound"), + Token::Dollar => lex("Token::Dollar"), + Token::Question => lex("Token::Question"), + Token::OpenDelim(dt) => { + match dt { + token::DelimToken::Paren => lex("Token::OpenDelim(DelimToken::Paren)"), + token::DelimToken::Bracket => lex("Token::OpenDelim(DelimToken::Bracket)"), + token::DelimToken::Brace => lex("Token::OpenDelim(DelimToken::Brace)"), + token::DelimToken::NoDelim => lex("DelimToken::NoDelim"), + } + } + Token::CloseDelim(dt) => { + match dt { + token::DelimToken::Paren => lex("Token::CloseDelim(DelimToken::Paren)"), + token::DelimToken::Bracket => lex("Token::CloseDelim(DelimToken::Bracket)"), + token::DelimToken::Brace => lex("Token::CloseDelim(DelimToken::Brace)"), + token::DelimToken::NoDelim => lex("DelimToken::NoDelim"), + } + } + Token::Underscore => lex("_"), + Token::Literal(lit, sfx) => emit_lit(lit, sfx), + // fix ident expansion information... somehow + Token::Ident(ident) => lex(&format!("Token::Ident(str_to_ident(\"{}\"))", ident.name)), + Token::Lifetime(ident) => lex(&format!("Token::Ident(str_to_ident(\"{}\"))", + ident.name)), + _ => panic!("Unhandled case!"), + } + } + + // ____________________________________________________________________________________________ + // Conversion operators + + pub fn as_tt(t: Token) -> TokenTree { + // FIXME do something nicer with the spans + TokenTree::Token(DUMMY_SP, t) + } + + // ____________________________________________________________________________________________ + // Build Procedures + + /// Takes `input` and returns `vec![input]`. + pub fn build_vec(ts: TokenStream) -> TokenStream { + build_mac_call(str_to_ident("vec"), ts) + // tts.clone().to_owned() + } + + /// Takes `ident` and `rhs` and produces `let ident = rhs;`. + pub fn build_let(id: Ident, tts: TokenStream) -> TokenStream { + concat(from_tokens(vec![keyword_to_token_ident(keywords::Let), + Token::Ident(id), + Token::Eq]), + concat(tts, from_tokens(vec![Token::Semi]))) + } + + /// Takes `ident ...`, and `args ...` and produces `ident::...(args ...)`. + pub fn build_mod_call(ids: Vec, args: TokenStream) -> TokenStream { + let call = from_tokens(intersperse(ids.into_iter().map(|id| Token::Ident(id)).collect(), + Token::ModSep)); + concat(call, build_paren_delimited(args)) + } + + /// Takes `ident` and `args ...` and produces `ident(args ...)`. + pub fn build_fn_call(name: Ident, args: TokenStream) -> TokenStream { + concat(from_tokens(vec![Token::Ident(name)]), build_paren_delimited(args)) + } + + /// Takes `ident` and `args ...` and produces `ident!(args ...)`. + pub fn build_mac_call(name: Ident, args: TokenStream) -> TokenStream { + concat(from_tokens(vec![Token::Ident(name), Token::Not]), + build_paren_delimited(args)) + } + + // ____________________________________________________________________________________________ + // Utilities + + /// A wrapper around `TokenStream::from_tokens` to avoid extra namespace specification and + /// provide it as a generic operator. + pub fn from_tokens(tokens: Vec) -> TokenStream { + TokenStream::from_tokens(tokens) + } + + pub fn intersperse(vs: Vec, t: T) -> Vec + where T: Clone + { + if vs.len() < 2 { + return vs; + } + let mut output = vec![vs.get(0).unwrap().to_owned()]; + + for v in vs.into_iter().skip(1) { + output.push(t.clone()); + output.push(v); + } + output + } +} diff --git a/src/librand/reseeding.rs b/src/librand/reseeding.rs index c7d560eb1f..48395c12fa 100644 --- a/src/librand/reseeding.rs +++ b/src/librand/reseeding.rs @@ -113,6 +113,7 @@ impl Reseeder for ReseedWithDefault { } #[stable(feature = "rust1", since = "1.0.0")] impl Default for ReseedWithDefault { + /// Creates an instance of `ReseedWithDefault`. fn default() -> ReseedWithDefault { ReseedWithDefault } @@ -137,6 +138,7 @@ mod tests { } } impl Default for Counter { + /// Constructs a `Counter` with initial value zero. fn default() -> Counter { Counter { i: 0 } } diff --git a/src/librbml/lib.rs b/src/librbml/lib.rs deleted file mode 100644 index 4edbeab5df..0000000000 --- a/src/librbml/lib.rs +++ /dev/null @@ -1,1609 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Really Bad Markup Language (rbml) is an internal serialization format of rustc. -//! This is not intended to be used by users. -//! -//! Originally based on the Extensible Binary Markup Language -//! (ebml; http://www.matroska.org/technical/specs/rfc/index.html), -//! it is now a separate format tuned for the rust object metadata. -//! -//! # Encoding -//! -//! RBML document consists of the tag, length and data. -//! The encoded data can contain multiple RBML documents concatenated. -//! -//! **Tags** are a hint for the following data. -//! Tags are a number from 0x000 to 0xfff, where 0xf0 through 0xff is reserved. -//! Tags less than 0xf0 are encoded in one literal byte. -//! Tags greater than 0xff are encoded in two big-endian bytes, -//! where the tag number is ORed with 0xf000. (E.g. tag 0x123 = `f1 23`) -//! -//! **Lengths** encode the length of the following data. -//! It is a variable-length unsigned isize, and one of the following forms: -//! -//! - `80` through `fe` for lengths up to 0x7e; -//! - `40 ff` through `7f ff` for lengths up to 0x3fff; -//! - `20 40 00` through `3f ff ff` for lengths up to 0x1fffff; -//! - `10 20 00 00` through `1f ff ff ff` for lengths up to 0xfffffff. -//! -//! The "overlong" form is allowed so that the length can be encoded -//! without the prior knowledge of the encoded data. -//! For example, the length 0 can be represented either by `80`, `40 00`, -//! `20 00 00` or `10 00 00 00`. -//! The encoder tries to minimize the length if possible. -//! Also, some predefined tags listed below are so commonly used that -//! their lengths are omitted ("implicit length"). -//! -//! **Data** can be either binary bytes or zero or more nested RBML documents. -//! Nested documents cannot overflow, and should be entirely contained -//! within a parent document. -//! -//! # Predefined Tags -//! -//! Most RBML tags are defined by the application. -//! (For the rust object metadata, see also `rustc::metadata::common`.) -//! RBML itself does define a set of predefined tags however, -//! intended for the auto-serialization implementation. -//! -//! Predefined tags with an implicit length: -//! -//! - `U8` (`00`): 1-byte unsigned integer. -//! - `U16` (`01`): 2-byte big endian unsigned integer. -//! - `U32` (`02`): 4-byte big endian unsigned integer. -//! - `U64` (`03`): 8-byte big endian unsigned integer. -//! Any of `U*` tags can be used to encode primitive unsigned integer types, -//! as long as it is no greater than the actual size. -//! For example, `u8` can only be represented via the `U8` tag. -//! -//! - `I8` (`04`): 1-byte signed integer. -//! - `I16` (`05`): 2-byte big endian signed integer. -//! - `I32` (`06`): 4-byte big endian signed integer. -//! - `I64` (`07`): 8-byte big endian signed integer. -//! Similar to `U*` tags. Always uses two's complement encoding. -//! -//! - `Bool` (`08`): 1-byte boolean value, `00` for false and `01` for true. -//! -//! - `Char` (`09`): 4-byte big endian Unicode scalar value. -//! Surrogate pairs or out-of-bound values are invalid. -//! -//! - `F32` (`0a`): 4-byte big endian unsigned integer representing -//! IEEE 754 binary32 floating-point format. -//! - `F64` (`0b`): 8-byte big endian unsigned integer representing -//! IEEE 754 binary64 floating-point format. -//! -//! - `Sub8` (`0c`): 1-byte unsigned integer for supplementary information. -//! - `Sub32` (`0d`): 4-byte unsigned integer for supplementary information. -//! Those two tags normally occur as the first subdocument of certain tags, -//! namely `Enum`, `Vec` and `Map`, to provide a variant or size information. -//! They can be used interchangeably. -//! -//! Predefined tags with an explicit length: -//! -//! - `Str` (`10`): A UTF-8-encoded string. -//! -//! - `Enum` (`11`): An enum. -//! The first subdocument should be `Sub*` tags with a variant ID. -//! Subsequent subdocuments, if any, encode variant arguments. -//! -//! - `Vec` (`12`): A vector (sequence). -//! - `VecElt` (`13`): A vector element. -//! The first subdocument should be `Sub*` tags with the number of elements. -//! Subsequent subdocuments should be `VecElt` tag per each element. -//! -//! - `Map` (`14`): A map (associated array). -//! - `MapKey` (`15`): A key part of the map entry. -//! - `MapVal` (`16`): A value part of the map entry. -//! The first subdocument should be `Sub*` tags with the number of entries. -//! Subsequent subdocuments should be an alternating sequence of -//! `MapKey` and `MapVal` tags per each entry. -//! -//! - `Opaque` (`17`): An opaque, custom-format tag. -//! Used to wrap ordinary custom tags or data in the auto-serialized context. -//! Rustc typically uses this to encode type information. -//! -//! First 0x20 tags are reserved by RBML; custom tags start at 0x20. - -#![crate_name = "rbml"] -#![unstable(feature = "rustc_private", issue = "27812")] -#![crate_type = "rlib"] -#![crate_type = "dylib"] -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/", - html_playground_url = "https://play.rust-lang.org/", - test(attr(deny(warnings))))] -#![cfg_attr(not(stage0), deny(warnings))] - -#![feature(rustc_private)] -#![feature(staged_api)] -#![feature(question_mark)] - -#![cfg_attr(test, feature(test))] - -extern crate serialize; - -#[cfg(test)] -extern crate serialize as rustc_serialize; // Used by RustcEncodable - -#[macro_use] -extern crate log; - -#[cfg(test)] -extern crate test; - -pub mod opaque; -pub mod leb128; - -pub use self::EbmlEncoderTag::*; -pub use self::Error::*; - -use std::str; -use std::fmt; - -/// Common data structures -#[derive(Clone, Copy)] -pub struct Doc<'a> { - pub data: &'a [u8], - pub start: usize, - pub end: usize, -} - -impl<'doc> Doc<'doc> { - pub fn new(data: &'doc [u8]) -> Doc<'doc> { - Doc { - data: data, - start: 0, - end: data.len(), - } - } - - pub fn get(&self, tag: usize) -> Doc<'doc> { - reader::get_doc(*self, tag) - } - - pub fn is_empty(&self) -> bool { - self.start == self.end - } - - pub fn as_str_slice(&self) -> &'doc str { - str::from_utf8(&self.data[self.start..self.end]).unwrap() - } - - pub fn as_str(&self) -> String { - self.as_str_slice().to_string() - } -} - -pub struct TaggedDoc<'a> { - tag: usize, - pub doc: Doc<'a>, -} - -#[derive(Copy, Clone, Debug)] -pub enum EbmlEncoderTag { - // tags 00..1f are reserved for auto-serialization. - // first NUM_IMPLICIT_TAGS tags are implicitly sized and lengths are not encoded. - EsU8 = 0x00, // + 1 byte - EsU16 = 0x01, // + 2 bytes - EsU32 = 0x02, // + 4 bytes - EsU64 = 0x03, // + 8 bytes - EsI8 = 0x04, // + 1 byte - EsI16 = 0x05, // + 2 bytes - EsI32 = 0x06, // + 4 bytes - EsI64 = 0x07, // + 8 bytes - EsBool = 0x08, // + 1 byte - EsChar = 0x09, // + 4 bytes - EsF32 = 0x0a, // + 4 bytes - EsF64 = 0x0b, // + 8 bytes - EsSub8 = 0x0c, // + 1 byte - EsSub32 = 0x0d, // + 4 bytes - // 0x0e and 0x0f are reserved - EsStr = 0x10, - EsEnum = 0x11, // encodes the variant id as the first EsSub* - EsVec = 0x12, // encodes the # of elements as the first EsSub* - EsVecElt = 0x13, - EsMap = 0x14, // encodes the # of pairs as the first EsSub* - EsMapKey = 0x15, - EsMapVal = 0x16, - EsOpaque = 0x17, -} - -const NUM_TAGS: usize = 0x1000; -const NUM_IMPLICIT_TAGS: usize = 0x0e; - -#[cfg_attr(rustfmt, rustfmt_skip)] -static TAG_IMPLICIT_LEN: [i8; NUM_IMPLICIT_TAGS] = [ - 1, 2, 4, 8, // EsU* - 1, 2, 4, 8, // ESI* - 1, // EsBool - 4, // EsChar - 4, 8, // EsF* - 1, 4, // EsSub* -]; - -#[derive(Debug)] -pub enum Error { - IntTooBig(usize), - InvalidTag(usize), - Expected(String), - IoError(std::io::Error), - ApplicationError(String), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // FIXME: this should be a more useful display form - fmt::Debug::fmt(self, f) - } -} -// -------------------------------------- - -pub mod reader { - use std::char; - - use std::isize; - use std::mem::transmute; - - use serialize; - - use super::opaque; - use super::{ApplicationError, EsVec, EsMap, EsEnum, EsSub8, EsSub32, EsVecElt, EsMapKey, - EsU64, EsU32, EsU16, EsU8, EsI64, EsI32, EsI16, EsI8, EsBool, EsF64, EsF32, - EsChar, EsStr, EsMapVal, EsOpaque, EbmlEncoderTag, Doc, TaggedDoc, Error, - IntTooBig, InvalidTag, Expected, NUM_IMPLICIT_TAGS, TAG_IMPLICIT_LEN}; - - pub type DecodeResult = Result; - // rbml reading - - macro_rules! try_or { - ($e:expr, $r:expr) => ( - match $e { - Ok(e) => e, - Err(e) => { - debug!("ignored error: {:?}", e); - return $r - } - } - ) - } - - #[derive(Copy, Clone)] - pub struct Res { - pub val: usize, - pub next: usize, - } - - pub fn tag_at(data: &[u8], start: usize) -> DecodeResult { - let v = data[start] as usize; - if v < 0xf0 { - Ok(Res { - val: v, - next: start + 1, - }) - } else if v > 0xf0 { - Ok(Res { - val: ((v & 0xf) << 8) | data[start + 1] as usize, - next: start + 2, - }) - } else { - // every tag starting with byte 0xf0 is an overlong form, which is prohibited. - Err(InvalidTag(v)) - } - } - - #[inline(never)] - fn vuint_at_slow(data: &[u8], start: usize) -> DecodeResult { - let a = data[start]; - if a & 0x80 != 0 { - return Ok(Res { - val: (a & 0x7f) as usize, - next: start + 1, - }); - } - if a & 0x40 != 0 { - return Ok(Res { - val: ((a & 0x3f) as usize) << 8 | (data[start + 1] as usize), - next: start + 2, - }); - } - if a & 0x20 != 0 { - return Ok(Res { - val: ((a & 0x1f) as usize) << 16 | (data[start + 1] as usize) << 8 | - (data[start + 2] as usize), - next: start + 3, - }); - } - if a & 0x10 != 0 { - return Ok(Res { - val: ((a & 0x0f) as usize) << 24 | (data[start + 1] as usize) << 16 | - (data[start + 2] as usize) << 8 | - (data[start + 3] as usize), - next: start + 4, - }); - } - Err(IntTooBig(a as usize)) - } - - pub fn vuint_at(data: &[u8], start: usize) -> DecodeResult { - if data.len() - start < 4 { - return vuint_at_slow(data, start); - } - - // Lookup table for parsing EBML Element IDs as per - // http://ebml.sourceforge.net/specs/ The Element IDs are parsed by - // reading a big endian u32 positioned at data[start]. Using the four - // most significant bits of the u32 we lookup in the table below how - // the element ID should be derived from it. - // - // The table stores tuples (shift, mask) where shift is the number the - // u32 should be right shifted with and mask is the value the right - // shifted value should be masked with. If for example the most - // significant bit is set this means it's a class A ID and the u32 - // should be right shifted with 24 and masked with 0x7f. Therefore we - // store (24, 0x7f) at index 0x8 - 0xF (four bit numbers where the most - // significant bit is set). - // - // By storing the number of shifts and masks in a table instead of - // checking in order if the most significant bit is set, the second - // most significant bit is set etc. we can replace up to three - // "and+branch" with a single table lookup which gives us a measured - // speedup of around 2x on x86_64. - static SHIFT_MASK_TABLE: [(usize, u32); 16] = [(0, 0x0), - (0, 0x0fffffff), - (8, 0x1fffff), - (8, 0x1fffff), - (16, 0x3fff), - (16, 0x3fff), - (16, 0x3fff), - (16, 0x3fff), - (24, 0x7f), - (24, 0x7f), - (24, 0x7f), - (24, 0x7f), - (24, 0x7f), - (24, 0x7f), - (24, 0x7f), - (24, 0x7f)]; - - unsafe { - let ptr = data.as_ptr().offset(start as isize) as *const u32; - let val = u32::from_be(*ptr); - - let i = (val >> 28) as usize; - let (shift, mask) = SHIFT_MASK_TABLE[i]; - Ok(Res { - val: ((val >> shift) & mask) as usize, - next: start + ((32 - shift) >> 3), - }) - } - } - - pub fn tag_len_at(data: &[u8], tag: Res) -> DecodeResult { - if tag.val < NUM_IMPLICIT_TAGS && TAG_IMPLICIT_LEN[tag.val] >= 0 { - Ok(Res { - val: TAG_IMPLICIT_LEN[tag.val] as usize, - next: tag.next, - }) - } else { - vuint_at(data, tag.next) - } - } - - pub fn doc_at<'a>(data: &'a [u8], start: usize) -> DecodeResult> { - let elt_tag = tag_at(data, start)?; - let elt_size = tag_len_at(data, elt_tag)?; - let end = elt_size.next + elt_size.val; - Ok(TaggedDoc { - tag: elt_tag.val, - doc: Doc { - data: data, - start: elt_size.next, - end: end, - }, - }) - } - - pub fn maybe_get_doc<'a>(d: Doc<'a>, tg: usize) -> Option> { - let mut pos = d.start; - while pos < d.end { - let elt_tag = try_or!(tag_at(d.data, pos), None); - let elt_size = try_or!(tag_len_at(d.data, elt_tag), None); - pos = elt_size.next + elt_size.val; - if elt_tag.val == tg { - return Some(Doc { - data: d.data, - start: elt_size.next, - end: pos, - }); - } - } - None - } - - pub fn get_doc<'a>(d: Doc<'a>, tg: usize) -> Doc<'a> { - match maybe_get_doc(d, tg) { - Some(d) => d, - None => { - error!("failed to find block with tag {:?}", tg); - panic!(); - } - } - } - - pub fn docs<'a>(d: Doc<'a>) -> DocsIterator<'a> { - DocsIterator { d: d } - } - - pub struct DocsIterator<'a> { - d: Doc<'a>, - } - - impl<'a> Iterator for DocsIterator<'a> { - type Item = (usize, Doc<'a>); - - fn next(&mut self) -> Option<(usize, Doc<'a>)> { - if self.d.start >= self.d.end { - return None; - } - - let elt_tag = try_or!(tag_at(self.d.data, self.d.start), { - self.d.start = self.d.end; - None - }); - let elt_size = try_or!(tag_len_at(self.d.data, elt_tag), { - self.d.start = self.d.end; - None - }); - - let end = elt_size.next + elt_size.val; - let doc = Doc { - data: self.d.data, - start: elt_size.next, - end: end, - }; - - self.d.start = end; - return Some((elt_tag.val, doc)); - } - } - - pub fn tagged_docs<'a>(d: Doc<'a>, tag: usize) -> TaggedDocsIterator<'a> { - TaggedDocsIterator { - iter: docs(d), - tag: tag, - } - } - - pub struct TaggedDocsIterator<'a> { - iter: DocsIterator<'a>, - tag: usize, - } - - impl<'a> Iterator for TaggedDocsIterator<'a> { - type Item = Doc<'a>; - - fn next(&mut self) -> Option> { - while let Some((tag, doc)) = self.iter.next() { - if tag == self.tag { - return Some(doc); - } - } - None - } - } - - pub fn with_doc_data(d: Doc, f: F) -> T - where F: FnOnce(&[u8]) -> T - { - f(&d.data[d.start..d.end]) - } - - pub fn doc_as_u8(d: Doc) -> u8 { - assert_eq!(d.end, d.start + 1); - d.data[d.start] - } - - pub fn doc_as_u64(d: Doc) -> u64 { - if d.end >= 8 { - // For performance, we read 8 big-endian bytes, - // and mask off the junk if there is any. This - // obviously won't work on the first 8 bytes - // of a file - we will fall of the start - // of the page and segfault. - - let mut b = [0; 8]; - b.copy_from_slice(&d.data[d.end - 8..d.end]); - let data = unsafe { (*(b.as_ptr() as *const u64)).to_be() }; - let len = d.end - d.start; - if len < 8 { - data & ((1 << (len * 8)) - 1) - } else { - data - } - } else { - let mut result = 0; - for b in &d.data[d.start..d.end] { - result = (result << 8) + (*b as u64); - } - result - } - } - - #[inline] - pub fn doc_as_u16(d: Doc) -> u16 { - doc_as_u64(d) as u16 - } - #[inline] - pub fn doc_as_u32(d: Doc) -> u32 { - doc_as_u64(d) as u32 - } - - #[inline] - pub fn doc_as_i8(d: Doc) -> i8 { - doc_as_u8(d) as i8 - } - #[inline] - pub fn doc_as_i16(d: Doc) -> i16 { - doc_as_u16(d) as i16 - } - #[inline] - pub fn doc_as_i32(d: Doc) -> i32 { - doc_as_u32(d) as i32 - } - #[inline] - pub fn doc_as_i64(d: Doc) -> i64 { - doc_as_u64(d) as i64 - } - - pub struct Decoder<'a> { - parent: Doc<'a>, - pos: usize, - } - - impl<'doc> Decoder<'doc> { - pub fn new(d: Doc<'doc>) -> Decoder<'doc> { - Decoder { - parent: d, - pos: d.start, - } - } - - fn next_doc(&mut self, exp_tag: EbmlEncoderTag) -> DecodeResult> { - debug!(". next_doc(exp_tag={:?})", exp_tag); - if self.pos >= self.parent.end { - return Err(Expected(format!("no more documents in current node!"))); - } - let TaggedDoc { tag: r_tag, doc: r_doc } = doc_at(self.parent.data, self.pos)?; - debug!("self.parent={:?}-{:?} self.pos={:?} r_tag={:?} r_doc={:?}-{:?}", - self.parent.start, - self.parent.end, - self.pos, - r_tag, - r_doc.start, - r_doc.end); - if r_tag != (exp_tag as usize) { - return Err(Expected(format!("expected EBML doc with tag {:?} but found tag {:?}", - exp_tag, - r_tag))); - } - if r_doc.end > self.parent.end { - return Err(Expected(format!("invalid EBML, child extends to {:#x}, parent to \ - {:#x}", - r_doc.end, - self.parent.end))); - } - self.pos = r_doc.end; - Ok(r_doc) - } - - fn push_doc(&mut self, exp_tag: EbmlEncoderTag, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - let d = self.next_doc(exp_tag)?; - let old_parent = self.parent; - let old_pos = self.pos; - self.parent = d; - self.pos = d.start; - let r = f(self)?; - self.parent = old_parent; - self.pos = old_pos; - Ok(r) - } - - fn _next_sub(&mut self) -> DecodeResult { - // empty vector/map optimization - if self.parent.is_empty() { - return Ok(0); - } - - let TaggedDoc { tag: r_tag, doc: r_doc } = doc_at(self.parent.data, self.pos)?; - let r = if r_tag == (EsSub8 as usize) { - doc_as_u8(r_doc) as usize - } else if r_tag == (EsSub32 as usize) { - doc_as_u32(r_doc) as usize - } else { - return Err(Expected(format!("expected EBML doc with tag {:?} or {:?} but found \ - tag {:?}", - EsSub8, - EsSub32, - r_tag))); - }; - if r_doc.end > self.parent.end { - return Err(Expected(format!("invalid EBML, child extends to {:#x}, parent to \ - {:#x}", - r_doc.end, - self.parent.end))); - } - self.pos = r_doc.end; - debug!("_next_sub result={:?}", r); - Ok(r) - } - - // variable-length unsigned integer with different tags. - // `first_tag` should be a tag for u8 or i8. - // `last_tag` should be the largest allowed integer tag with the matching signedness. - // all tags between them should be valid, in the order of u8, u16, u32 and u64. - fn _next_int(&mut self, - first_tag: EbmlEncoderTag, - last_tag: EbmlEncoderTag) - -> DecodeResult { - if self.pos >= self.parent.end { - return Err(Expected(format!("no more documents in current node!"))); - } - - let TaggedDoc { tag: r_tag, doc: r_doc } = doc_at(self.parent.data, self.pos)?; - let r = if first_tag as usize <= r_tag && r_tag <= last_tag as usize { - match r_tag - first_tag as usize { - 0 => doc_as_u8(r_doc) as u64, - 1 => doc_as_u16(r_doc) as u64, - 2 => doc_as_u32(r_doc) as u64, - 3 => doc_as_u64(r_doc), - _ => unreachable!(), - } - } else { - return Err(Expected(format!("expected EBML doc with tag {:?} through {:?} but \ - found tag {:?}", - first_tag, - last_tag, - r_tag))); - }; - if r_doc.end > self.parent.end { - return Err(Expected(format!("invalid EBML, child extends to {:#x}, parent to \ - {:#x}", - r_doc.end, - self.parent.end))); - } - self.pos = r_doc.end; - debug!("_next_int({:?}, {:?}) result={:?}", first_tag, last_tag, r); - Ok(r) - } - - pub fn read_opaque(&mut self, op: F) -> DecodeResult - where F: FnOnce(&mut opaque::Decoder, Doc) -> DecodeResult - { - let doc = self.next_doc(EsOpaque)?; - - let result = { - let mut opaque_decoder = opaque::Decoder::new(doc.data, doc.start); - op(&mut opaque_decoder, doc)? - }; - - Ok(result) - } - - pub fn position(&self) -> usize { - self.pos - } - - pub fn advance(&mut self, bytes: usize) { - self.pos += bytes; - } - } - - impl<'doc> serialize::Decoder for Decoder<'doc> { - type Error = Error; - fn read_nil(&mut self) -> DecodeResult<()> { - Ok(()) - } - - fn read_u64(&mut self) -> DecodeResult { - self._next_int(EsU8, EsU64) - } - fn read_u32(&mut self) -> DecodeResult { - Ok(self._next_int(EsU8, EsU32)? as u32) - } - fn read_u16(&mut self) -> DecodeResult { - Ok(self._next_int(EsU8, EsU16)? as u16) - } - fn read_u8(&mut self) -> DecodeResult { - Ok(doc_as_u8(self.next_doc(EsU8)?)) - } - fn read_uint(&mut self) -> DecodeResult { - let v = self._next_int(EsU8, EsU64)?; - if v > (::std::usize::MAX as u64) { - Err(IntTooBig(v as usize)) - } else { - Ok(v as usize) - } - } - - fn read_i64(&mut self) -> DecodeResult { - Ok(self._next_int(EsI8, EsI64)? as i64) - } - fn read_i32(&mut self) -> DecodeResult { - Ok(self._next_int(EsI8, EsI32)? as i32) - } - fn read_i16(&mut self) -> DecodeResult { - Ok(self._next_int(EsI8, EsI16)? as i16) - } - fn read_i8(&mut self) -> DecodeResult { - Ok(doc_as_u8(self.next_doc(EsI8)?) as i8) - } - fn read_int(&mut self) -> DecodeResult { - let v = self._next_int(EsI8, EsI64)? as i64; - if v > (isize::MAX as i64) || v < (isize::MIN as i64) { - debug!("FIXME \\#6122: Removing this makes this function miscompile"); - Err(IntTooBig(v as usize)) - } else { - Ok(v as isize) - } - } - - fn read_bool(&mut self) -> DecodeResult { - Ok(doc_as_u8(self.next_doc(EsBool)?) != 0) - } - - fn read_f64(&mut self) -> DecodeResult { - let bits = doc_as_u64(self.next_doc(EsF64)?); - Ok(unsafe { transmute(bits) }) - } - fn read_f32(&mut self) -> DecodeResult { - let bits = doc_as_u32(self.next_doc(EsF32)?); - Ok(unsafe { transmute(bits) }) - } - fn read_char(&mut self) -> DecodeResult { - Ok(char::from_u32(doc_as_u32(self.next_doc(EsChar)?)).unwrap()) - } - fn read_str(&mut self) -> DecodeResult { - Ok(self.next_doc(EsStr)?.as_str()) - } - - // Compound types: - fn read_enum(&mut self, name: &str, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - debug!("read_enum({})", name); - - let doc = self.next_doc(EsEnum)?; - - let (old_parent, old_pos) = (self.parent, self.pos); - self.parent = doc; - self.pos = self.parent.start; - - let result = f(self)?; - - self.parent = old_parent; - self.pos = old_pos; - Ok(result) - } - - fn read_enum_variant(&mut self, _: &[&str], mut f: F) -> DecodeResult - where F: FnMut(&mut Decoder<'doc>, usize) -> DecodeResult - { - debug!("read_enum_variant()"); - let idx = self._next_sub()?; - debug!(" idx={}", idx); - - f(self, idx) - } - - fn read_enum_variant_arg(&mut self, idx: usize, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - debug!("read_enum_variant_arg(idx={})", idx); - f(self) - } - - fn read_enum_struct_variant(&mut self, _: &[&str], mut f: F) -> DecodeResult - where F: FnMut(&mut Decoder<'doc>, usize) -> DecodeResult - { - debug!("read_enum_struct_variant()"); - let idx = self._next_sub()?; - debug!(" idx={}", idx); - - f(self, idx) - } - - fn read_enum_struct_variant_field(&mut self, - name: &str, - idx: usize, - f: F) - -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - debug!("read_enum_struct_variant_arg(name={}, idx={})", name, idx); - f(self) - } - - fn read_struct(&mut self, name: &str, _: usize, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - debug!("read_struct(name={})", name); - f(self) - } - - fn read_struct_field(&mut self, name: &str, idx: usize, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - debug!("read_struct_field(name={}, idx={})", name, idx); - f(self) - } - - fn read_tuple(&mut self, tuple_len: usize, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - debug!("read_tuple()"); - self.read_seq(move |d, len| { - if len == tuple_len { - f(d) - } else { - Err(Expected(format!("Expected tuple of length `{}`, found tuple of length \ - `{}`", - tuple_len, - len))) - } - }) - } - - fn read_tuple_arg(&mut self, idx: usize, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - debug!("read_tuple_arg(idx={})", idx); - self.read_seq_elt(idx, f) - } - - fn read_tuple_struct(&mut self, name: &str, len: usize, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - debug!("read_tuple_struct(name={})", name); - self.read_tuple(len, f) - } - - fn read_tuple_struct_arg(&mut self, idx: usize, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - debug!("read_tuple_struct_arg(idx={})", idx); - self.read_tuple_arg(idx, f) - } - - fn read_option(&mut self, mut f: F) -> DecodeResult - where F: FnMut(&mut Decoder<'doc>, bool) -> DecodeResult - { - debug!("read_option()"); - self.read_enum("Option", move |this| { - this.read_enum_variant(&["None", "Some"], move |this, idx| { - match idx { - 0 => f(this, false), - 1 => f(this, true), - _ => Err(Expected(format!("Expected None or Some"))), - } - }) - }) - } - - fn read_seq(&mut self, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>, usize) -> DecodeResult - { - debug!("read_seq()"); - self.push_doc(EsVec, move |d| { - let len = d._next_sub()?; - debug!(" len={}", len); - f(d, len) - }) - } - - fn read_seq_elt(&mut self, idx: usize, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - debug!("read_seq_elt(idx={})", idx); - self.push_doc(EsVecElt, f) - } - - fn read_map(&mut self, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>, usize) -> DecodeResult - { - debug!("read_map()"); - self.push_doc(EsMap, move |d| { - let len = d._next_sub()?; - debug!(" len={}", len); - f(d, len) - }) - } - - fn read_map_elt_key(&mut self, idx: usize, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - debug!("read_map_elt_key(idx={})", idx); - self.push_doc(EsMapKey, f) - } - - fn read_map_elt_val(&mut self, idx: usize, f: F) -> DecodeResult - where F: FnOnce(&mut Decoder<'doc>) -> DecodeResult - { - debug!("read_map_elt_val(idx={})", idx); - self.push_doc(EsMapVal, f) - } - - fn error(&mut self, err: &str) -> Error { - ApplicationError(err.to_string()) - } - } -} - -pub mod writer { - use std::mem; - use std::io::prelude::*; - use std::io::{self, SeekFrom, Cursor}; - - use super::opaque; - use super::{EsVec, EsMap, EsEnum, EsSub8, EsSub32, EsVecElt, EsMapKey, EsU64, EsU32, EsU16, - EsU8, EsI64, EsI32, EsI16, EsI8, EsBool, EsF64, EsF32, EsChar, EsStr, EsMapVal, - EsOpaque, NUM_IMPLICIT_TAGS, NUM_TAGS}; - - use serialize; - - - pub type EncodeResult = io::Result<()>; - - // rbml writing - pub struct Encoder<'a> { - pub writer: &'a mut Cursor>, - size_positions: Vec, - relax_limit: u64, // do not move encoded bytes before this position - } - - fn write_tag(w: &mut W, n: usize) -> EncodeResult { - if n < 0xf0 { - w.write_all(&[n as u8]) - } else if 0x100 <= n && n < NUM_TAGS { - w.write_all(&[0xf0 | (n >> 8) as u8, n as u8]) - } else { - Err(io::Error::new(io::ErrorKind::Other, &format!("invalid tag: {}", n)[..])) - } - } - - fn write_sized_vuint(w: &mut W, n: usize, size: usize) -> EncodeResult { - match size { - 1 => w.write_all(&[0x80 | (n as u8)]), - 2 => w.write_all(&[0x40 | ((n >> 8) as u8), n as u8]), - 3 => w.write_all(&[0x20 | ((n >> 16) as u8), (n >> 8) as u8, n as u8]), - 4 => w.write_all(&[0x10 | ((n >> 24) as u8), (n >> 16) as u8, (n >> 8) as u8, n as u8]), - _ => Err(io::Error::new(io::ErrorKind::Other, &format!("isize too big: {}", n)[..])), - } - } - - pub fn write_vuint(w: &mut W, n: usize) -> EncodeResult { - if n < 0x7f { - return write_sized_vuint(w, n, 1); - } - if n < 0x4000 { - return write_sized_vuint(w, n, 2); - } - if n < 0x200000 { - return write_sized_vuint(w, n, 3); - } - if n < 0x10000000 { - return write_sized_vuint(w, n, 4); - } - Err(io::Error::new(io::ErrorKind::Other, &format!("isize too big: {}", n)[..])) - } - - impl<'a> Encoder<'a> { - pub fn new(w: &'a mut Cursor>) -> Encoder<'a> { - Encoder { - writer: w, - size_positions: vec![], - relax_limit: 0, - } - } - - pub fn start_tag(&mut self, tag_id: usize) -> EncodeResult { - debug!("Start tag {:?}", tag_id); - assert!(tag_id >= NUM_IMPLICIT_TAGS); - - // Write the enum ID: - write_tag(self.writer, tag_id)?; - - // Write a placeholder four-byte size. - let cur_pos = self.writer.seek(SeekFrom::Current(0))?; - self.size_positions.push(cur_pos); - let zeroes: &[u8] = &[0, 0, 0, 0]; - self.writer.write_all(zeroes) - } - - pub fn end_tag(&mut self) -> EncodeResult { - let last_size_pos = self.size_positions.pop().unwrap(); - let cur_pos = self.writer.seek(SeekFrom::Current(0))?; - self.writer.seek(SeekFrom::Start(last_size_pos))?; - let size = (cur_pos - last_size_pos - 4) as usize; - - // relax the size encoding for small tags (bigger tags are costly to move). - // we should never try to move the stable positions, however. - const RELAX_MAX_SIZE: usize = 0x100; - if size <= RELAX_MAX_SIZE && last_size_pos >= self.relax_limit { - // we can't alter the buffer in place, so have a temporary buffer - let mut buf = [0u8; RELAX_MAX_SIZE]; - { - let last_size_pos = last_size_pos as usize; - let data = &self.writer.get_ref()[last_size_pos + 4..cur_pos as usize]; - buf[..size].copy_from_slice(data); - } - - // overwrite the size and data and continue - write_vuint(self.writer, size)?; - self.writer.write_all(&buf[..size])?; - } else { - // overwrite the size with an overlong encoding and skip past the data - write_sized_vuint(self.writer, size, 4)?; - self.writer.seek(SeekFrom::Start(cur_pos))?; - } - - debug!("End tag (size = {:?})", size); - Ok(()) - } - - pub fn wr_tag(&mut self, tag_id: usize, blk: F) -> EncodeResult - where F: FnOnce() -> EncodeResult - { - self.start_tag(tag_id)?; - blk()?; - self.end_tag() - } - - pub fn wr_tagged_bytes(&mut self, tag_id: usize, b: &[u8]) -> EncodeResult { - assert!(tag_id >= NUM_IMPLICIT_TAGS); - write_tag(self.writer, tag_id)?; - write_vuint(self.writer, b.len())?; - self.writer.write_all(b) - } - - pub fn wr_tagged_u64(&mut self, tag_id: usize, v: u64) -> EncodeResult { - let bytes: [u8; 8] = unsafe { mem::transmute(v.to_be()) }; - // tagged integers are emitted in big-endian, with no - // leading zeros. - let leading_zero_bytes = v.leading_zeros() / 8; - self.wr_tagged_bytes(tag_id, &bytes[leading_zero_bytes as usize..]) - } - - #[inline] - pub fn wr_tagged_u32(&mut self, tag_id: usize, v: u32) -> EncodeResult { - self.wr_tagged_u64(tag_id, v as u64) - } - - #[inline] - pub fn wr_tagged_u16(&mut self, tag_id: usize, v: u16) -> EncodeResult { - self.wr_tagged_u64(tag_id, v as u64) - } - - #[inline] - pub fn wr_tagged_u8(&mut self, tag_id: usize, v: u8) -> EncodeResult { - self.wr_tagged_bytes(tag_id, &[v]) - } - - #[inline] - pub fn wr_tagged_i64(&mut self, tag_id: usize, v: i64) -> EncodeResult { - self.wr_tagged_u64(tag_id, v as u64) - } - - #[inline] - pub fn wr_tagged_i32(&mut self, tag_id: usize, v: i32) -> EncodeResult { - self.wr_tagged_u32(tag_id, v as u32) - } - - #[inline] - pub fn wr_tagged_i16(&mut self, tag_id: usize, v: i16) -> EncodeResult { - self.wr_tagged_u16(tag_id, v as u16) - } - - #[inline] - pub fn wr_tagged_i8(&mut self, tag_id: usize, v: i8) -> EncodeResult { - self.wr_tagged_bytes(tag_id, &[v as u8]) - } - - pub fn wr_tagged_str(&mut self, tag_id: usize, v: &str) -> EncodeResult { - self.wr_tagged_bytes(tag_id, v.as_bytes()) - } - - // for auto-serialization - fn wr_tagged_raw_bytes(&mut self, tag_id: usize, b: &[u8]) -> EncodeResult { - write_tag(self.writer, tag_id)?; - self.writer.write_all(b) - } - - fn wr_tagged_raw_u64(&mut self, tag_id: usize, v: u64) -> EncodeResult { - let bytes: [u8; 8] = unsafe { mem::transmute(v.to_be()) }; - self.wr_tagged_raw_bytes(tag_id, &bytes) - } - - fn wr_tagged_raw_u32(&mut self, tag_id: usize, v: u32) -> EncodeResult { - let bytes: [u8; 4] = unsafe { mem::transmute(v.to_be()) }; - self.wr_tagged_raw_bytes(tag_id, &bytes) - } - - fn wr_tagged_raw_u16(&mut self, tag_id: usize, v: u16) -> EncodeResult { - let bytes: [u8; 2] = unsafe { mem::transmute(v.to_be()) }; - self.wr_tagged_raw_bytes(tag_id, &bytes) - } - - fn wr_tagged_raw_u8(&mut self, tag_id: usize, v: u8) -> EncodeResult { - self.wr_tagged_raw_bytes(tag_id, &[v]) - } - - fn wr_tagged_raw_i64(&mut self, tag_id: usize, v: i64) -> EncodeResult { - self.wr_tagged_raw_u64(tag_id, v as u64) - } - - fn wr_tagged_raw_i32(&mut self, tag_id: usize, v: i32) -> EncodeResult { - self.wr_tagged_raw_u32(tag_id, v as u32) - } - - fn wr_tagged_raw_i16(&mut self, tag_id: usize, v: i16) -> EncodeResult { - self.wr_tagged_raw_u16(tag_id, v as u16) - } - - fn wr_tagged_raw_i8(&mut self, tag_id: usize, v: i8) -> EncodeResult { - self.wr_tagged_raw_bytes(tag_id, &[v as u8]) - } - - pub fn wr_bytes(&mut self, b: &[u8]) -> EncodeResult { - debug!("Write {:?} bytes", b.len()); - self.writer.write_all(b) - } - - pub fn wr_str(&mut self, s: &str) -> EncodeResult { - debug!("Write str: {:?}", s); - self.writer.write_all(s.as_bytes()) - } - - /// Returns the current position while marking it stable, i.e. - /// generated bytes so far wouldn't be affected by relaxation. - pub fn mark_stable_position(&mut self) -> u64 { - let pos = self.writer.seek(SeekFrom::Current(0)).unwrap(); - if self.relax_limit < pos { - self.relax_limit = pos; - } - pos - } - } - - impl<'a> Encoder<'a> { - // used internally to emit things like the vector length and so on - fn _emit_tagged_sub(&mut self, v: usize) -> EncodeResult { - if v as u8 as usize == v { - self.wr_tagged_raw_u8(EsSub8 as usize, v as u8) - } else if v as u32 as usize == v { - self.wr_tagged_raw_u32(EsSub32 as usize, v as u32) - } else { - Err(io::Error::new(io::ErrorKind::Other, - &format!("length or variant id too big: {}", v)[..])) - } - } - - pub fn emit_opaque(&mut self, f: F) -> EncodeResult - where F: FnOnce(&mut opaque::Encoder) -> EncodeResult - { - self.start_tag(EsOpaque as usize)?; - - { - let mut opaque_encoder = opaque::Encoder::new(self.writer); - f(&mut opaque_encoder)?; - } - - self.mark_stable_position(); - self.end_tag() - } - } - - impl<'a> serialize::Encoder for Encoder<'a> { - type Error = io::Error; - - fn emit_nil(&mut self) -> EncodeResult { - Ok(()) - } - - fn emit_uint(&mut self, v: usize) -> EncodeResult { - self.emit_u64(v as u64) - } - fn emit_u64(&mut self, v: u64) -> EncodeResult { - if v as u32 as u64 == v { - self.emit_u32(v as u32) - } else { - self.wr_tagged_raw_u64(EsU64 as usize, v) - } - } - fn emit_u32(&mut self, v: u32) -> EncodeResult { - if v as u16 as u32 == v { - self.emit_u16(v as u16) - } else { - self.wr_tagged_raw_u32(EsU32 as usize, v) - } - } - fn emit_u16(&mut self, v: u16) -> EncodeResult { - if v as u8 as u16 == v { - self.emit_u8(v as u8) - } else { - self.wr_tagged_raw_u16(EsU16 as usize, v) - } - } - fn emit_u8(&mut self, v: u8) -> EncodeResult { - self.wr_tagged_raw_u8(EsU8 as usize, v) - } - - fn emit_int(&mut self, v: isize) -> EncodeResult { - self.emit_i64(v as i64) - } - fn emit_i64(&mut self, v: i64) -> EncodeResult { - if v as i32 as i64 == v { - self.emit_i32(v as i32) - } else { - self.wr_tagged_raw_i64(EsI64 as usize, v) - } - } - fn emit_i32(&mut self, v: i32) -> EncodeResult { - if v as i16 as i32 == v { - self.emit_i16(v as i16) - } else { - self.wr_tagged_raw_i32(EsI32 as usize, v) - } - } - fn emit_i16(&mut self, v: i16) -> EncodeResult { - if v as i8 as i16 == v { - self.emit_i8(v as i8) - } else { - self.wr_tagged_raw_i16(EsI16 as usize, v) - } - } - fn emit_i8(&mut self, v: i8) -> EncodeResult { - self.wr_tagged_raw_i8(EsI8 as usize, v) - } - - fn emit_bool(&mut self, v: bool) -> EncodeResult { - self.wr_tagged_raw_u8(EsBool as usize, v as u8) - } - - fn emit_f64(&mut self, v: f64) -> EncodeResult { - let bits = unsafe { mem::transmute(v) }; - self.wr_tagged_raw_u64(EsF64 as usize, bits) - } - fn emit_f32(&mut self, v: f32) -> EncodeResult { - let bits = unsafe { mem::transmute(v) }; - self.wr_tagged_raw_u32(EsF32 as usize, bits) - } - fn emit_char(&mut self, v: char) -> EncodeResult { - self.wr_tagged_raw_u32(EsChar as usize, v as u32) - } - - fn emit_str(&mut self, v: &str) -> EncodeResult { - self.wr_tagged_str(EsStr as usize, v) - } - - fn emit_enum(&mut self, _name: &str, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - self.start_tag(EsEnum as usize)?; - f(self)?; - self.end_tag() - } - - fn emit_enum_variant(&mut self, _: &str, v_id: usize, _: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - self._emit_tagged_sub(v_id)?; - f(self) - } - - fn emit_enum_variant_arg(&mut self, _: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - f(self) - } - - fn emit_enum_struct_variant(&mut self, - v_name: &str, - v_id: usize, - cnt: usize, - f: F) - -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - self.emit_enum_variant(v_name, v_id, cnt, f) - } - - fn emit_enum_struct_variant_field(&mut self, _: &str, idx: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - self.emit_enum_variant_arg(idx, f) - } - - fn emit_struct(&mut self, _: &str, _len: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - f(self) - } - - fn emit_struct_field(&mut self, _name: &str, _: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - f(self) - } - - fn emit_tuple(&mut self, len: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - self.emit_seq(len, f) - } - fn emit_tuple_arg(&mut self, idx: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - self.emit_seq_elt(idx, f) - } - - fn emit_tuple_struct(&mut self, _: &str, len: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - self.emit_seq(len, f) - } - fn emit_tuple_struct_arg(&mut self, idx: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - self.emit_seq_elt(idx, f) - } - - fn emit_option(&mut self, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - self.emit_enum("Option", f) - } - fn emit_option_none(&mut self) -> EncodeResult { - self.emit_enum_variant("None", 0, 0, |_| Ok(())) - } - fn emit_option_some(&mut self, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - - self.emit_enum_variant("Some", 1, 1, f) - } - - fn emit_seq(&mut self, len: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - if len == 0 { - // empty vector optimization - return self.wr_tagged_bytes(EsVec as usize, &[]); - } - - self.start_tag(EsVec as usize)?; - self._emit_tagged_sub(len)?; - f(self)?; - self.end_tag() - } - - fn emit_seq_elt(&mut self, _idx: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - - self.start_tag(EsVecElt as usize)?; - f(self)?; - self.end_tag() - } - - fn emit_map(&mut self, len: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - if len == 0 { - // empty map optimization - return self.wr_tagged_bytes(EsMap as usize, &[]); - } - - self.start_tag(EsMap as usize)?; - self._emit_tagged_sub(len)?; - f(self)?; - self.end_tag() - } - - fn emit_map_elt_key(&mut self, _idx: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - - self.start_tag(EsMapKey as usize)?; - f(self)?; - self.end_tag() - } - - fn emit_map_elt_val(&mut self, _idx: usize, f: F) -> EncodeResult - where F: FnOnce(&mut Encoder<'a>) -> EncodeResult - { - self.start_tag(EsMapVal as usize)?; - f(self)?; - self.end_tag() - } - } -} - -// ___________________________________________________________________________ -// Testing - -#[cfg(test)] -mod tests { - use super::{Doc, reader, writer}; - - use serialize::{Encodable, Decodable}; - - use std::io::Cursor; - - #[test] - fn test_vuint_at() { - let data = &[ - 0x80, - 0xff, - 0x40, 0x00, - 0x7f, 0xff, - 0x20, 0x00, 0x00, - 0x3f, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, - 0x1f, 0xff, 0xff, 0xff - ]; - - let mut res: reader::Res; - - // Class A - res = reader::vuint_at(data, 0).unwrap(); - assert_eq!(res.val, 0); - assert_eq!(res.next, 1); - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, (1 << 7) - 1); - assert_eq!(res.next, 2); - - // Class B - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, 0); - assert_eq!(res.next, 4); - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, (1 << 14) - 1); - assert_eq!(res.next, 6); - - // Class C - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, 0); - assert_eq!(res.next, 9); - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, (1 << 21) - 1); - assert_eq!(res.next, 12); - - // Class D - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, 0); - assert_eq!(res.next, 16); - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, (1 << 28) - 1); - assert_eq!(res.next, 20); - } - - #[test] - fn test_option_int() { - fn test_v(v: Option) { - debug!("v == {:?}", v); - let mut wr = Cursor::new(Vec::new()); - { - let mut rbml_w = writer::Encoder::new(&mut wr); - let _ = v.encode(&mut rbml_w); - } - let rbml_doc = Doc::new(wr.get_ref()); - let mut deser = reader::Decoder::new(rbml_doc); - let v1 = Decodable::decode(&mut deser).unwrap(); - debug!("v1 == {:?}", v1); - assert_eq!(v, v1); - } - - test_v(Some(22)); - test_v(None); - test_v(Some(3)); - } -} - -#[cfg(test)] -mod bench { - #![allow(non_snake_case)] - use test::Bencher; - use super::reader; - - #[bench] - pub fn vuint_at_A_aligned(b: &mut Bencher) { - let data = (0..4 * 100) - .map(|i| { - match i % 2 { - 0 => 0x80, - _ => i as u8, - } - }) - .collect::>(); - let mut sum = 0; - b.iter(|| { - let mut i = 0; - while i < data.len() { - sum += reader::vuint_at(&data, i).unwrap().val; - i += 4; - } - }); - } - - #[bench] - pub fn vuint_at_A_unaligned(b: &mut Bencher) { - let data = (0..4 * 100 + 1) - .map(|i| { - match i % 2 { - 1 => 0x80, - _ => i as u8, - } - }) - .collect::>(); - let mut sum = 0; - b.iter(|| { - let mut i = 1; - while i < data.len() { - sum += reader::vuint_at(&data, i).unwrap().val; - i += 4; - } - }); - } - - #[bench] - pub fn vuint_at_D_aligned(b: &mut Bencher) { - let data = (0..4 * 100) - .map(|i| { - match i % 4 { - 0 => 0x10, - 3 => i as u8, - _ => 0, - } - }) - .collect::>(); - let mut sum = 0; - b.iter(|| { - let mut i = 0; - while i < data.len() { - sum += reader::vuint_at(&data, i).unwrap().val; - i += 4; - } - }); - } - - #[bench] - pub fn vuint_at_D_unaligned(b: &mut Bencher) { - let data = (0..4 * 100 + 1) - .map(|i| { - match i % 4 { - 1 => 0x10, - 0 => i as u8, - _ => 0, - } - }) - .collect::>(); - let mut sum = 0; - b.iter(|| { - let mut i = 1; - while i < data.len() { - sum += reader::vuint_at(&data, i).unwrap().val; - i += 4; - } - }); - } -} diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml index aaef8e8423..578ef68b00 100644 --- a/src/librustc/Cargo.toml +++ b/src/librustc/Cargo.toml @@ -14,7 +14,6 @@ flate = { path = "../libflate" } fmt_macros = { path = "../libfmt_macros" } graphviz = { path = "../libgraphviz" } log = { path = "../liblog" } -rbml = { path = "../librbml" } rustc_back = { path = "../librustc_back" } rustc_bitflags = { path = "../librustc_bitflags" } rustc_const_math = { path = "../librustc_const_math" } diff --git a/src/librustc/cfg/construct.rs b/src/librustc/cfg/construct.rs index 232db76a6d..25a7322647 100644 --- a/src/librustc/cfg/construct.rs +++ b/src/librustc/cfg/construct.rs @@ -99,7 +99,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn pat(&mut self, pat: &hir::Pat, pred: CFGIndex) -> CFGIndex { match pat.node { - PatKind::Binding(_, _, None) | + PatKind::Binding(.., None) | PatKind::Path(..) | PatKind::Lit(..) | PatKind::Range(..) | @@ -109,7 +109,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { PatKind::Box(ref subpat) | PatKind::Ref(ref subpat, _) | - PatKind::Binding(_, _, Some(ref subpat)) => { + PatKind::Binding(.., Some(ref subpat)) => { let subpat_exit = self.pat(&subpat, pred); self.add_ast_node(pat.id, &[subpat_exit]) } @@ -306,7 +306,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { self.call(expr, pred, &func, args.iter().map(|e| &**e)) } - hir::ExprMethodCall(_, _, ref args) => { + hir::ExprMethodCall(.., ref args) => { self.call(expr, pred, &args[0], args[1..].iter().map(|e| &**e)) } diff --git a/src/librustc/dep_graph/README.md b/src/librustc/dep_graph/README.md index f16a9b386b..48f5b7ea25 100644 --- a/src/librustc/dep_graph/README.md +++ b/src/librustc/dep_graph/README.md @@ -341,6 +341,8 @@ path is found (as demonstrated above). ### Debugging the dependency graph +#### Dumping the graph + The compiler is also capable of dumping the dependency graph for your debugging pleasure. To do so, pass the `-Z dump-dep-graph` flag. The graph will be dumped to `dep_graph.{txt,dot}` in the current @@ -392,6 +394,35 @@ This will dump out all the nodes that lead from `Hir(foo)` to `TypeckItemBody(bar)`, from which you can (hopefully) see the source of the erroneous edge. +#### Tracking down incorrect edges + +Sometimes, after you dump the dependency graph, you will find some +path that should not exist, but you will not be quite sure how it came +to be. **When the compiler is built with debug assertions,** it can +help you track that down. Simply set the `RUST_FORBID_DEP_GRAPH_EDGE` +environment variable to a filter. Every edge created in the dep-graph +will be tested against that filter -- if it matches, a `bug!` is +reported, so you can easily see the backtrace (`RUST_BACKTRACE=1`). + +The syntax for these filters is the same as described in the previous +section. However, note that this filter is applied to every **edge** +and doesn't handle longer paths in the graph, unlike the previous +section. + +Example: + +You find that there is a path from the `Hir` of `foo` to the type +check of `bar` and you don't think there should be. You dump the +dep-graph as described in the previous section and open `dep-graph.txt` +to see something like: + + Hir(foo) -> Collect(bar) + Collect(bar) -> TypeckItemBody(bar) + +That first edge looks suspicious to you. So you set +`RUST_FORBID_DEP_GRAPH_EDGE` to `Hir&foo -> Collect&bar`, re-run, and +then observe the backtrace. Voila, bug fixed! + ### Inlining of HIR nodes For the time being, at least, we still sometimes "inline" HIR nodes diff --git a/src/librustc/dep_graph/debug.rs b/src/librustc/dep_graph/debug.rs index 15b0380374..5b15c5e671 100644 --- a/src/librustc/dep_graph/debug.rs +++ b/src/librustc/dep_graph/debug.rs @@ -66,4 +66,11 @@ impl EdgeFilter { }) } } + + pub fn test(&self, + source: &DepNode, + target: &DepNode) + -> bool { + self.source.test(source) && self.target.test(target) + } } diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index 40fd3dede3..e99ffa95ed 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -51,7 +51,6 @@ pub enum DepNode { WorkProduct(Arc), // Represents different phases in the compiler. - CrateReader, CollectLanguageItems, CheckStaticRecursion, ResolveLifetimes, @@ -103,18 +102,13 @@ pub enum DepNode { // table in the tcx (or elsewhere) maps to one of these // nodes. Often we map multiple tables to the same node if there // is no point in distinguishing them (e.g., both the type and - // predicates for an item wind up in `ItemSignature`). Other - // times, such as `ImplItems` vs `TraitItemDefIds`, tables which - // might be mergable are kept distinct because the sets of def-ids - // to which they apply are disjoint, and hence we might as well - // have distinct labels for easier debugging. + // predicates for an item wind up in `ItemSignature`). ImplOrTraitItems(D), ItemSignature(D), FieldTy(D), SizedConstraint(D), - TraitItemDefIds(D), + ImplOrTraitItemDefIds(D), InherentImpls(D), - ImplItems(D), // The set of impls for a given trait. Ultimately, it would be // nice to get more fine-grained here (e.g., to include a @@ -132,7 +126,7 @@ pub enum DepNode { // which would yield an overly conservative dep-graph. TraitItems(D), ReprHints(D), - TraitSelect(D, Vec), + TraitSelect(Vec), } impl DepNode { @@ -147,6 +141,11 @@ impl DepNode { } } + if label == "Krate" { + // special case + return Ok(DepNode::Krate); + } + check! { CollectItem, BorrowCheck, @@ -157,9 +156,8 @@ impl DepNode { ImplOrTraitItems, ItemSignature, FieldTy, - TraitItemDefIds, + ImplOrTraitItemDefIds, InherentImpls, - ImplItems, TraitImpls, ReprHints, } @@ -172,7 +170,6 @@ impl DepNode { match *self { Krate => Some(Krate), - CrateReader => Some(CrateReader), CollectLanguageItems => Some(CollectLanguageItems), CheckStaticRecursion => Some(CheckStaticRecursion), ResolveLifetimes => Some(ResolveLifetimes), @@ -226,16 +223,14 @@ impl DepNode { ItemSignature(ref d) => op(d).map(ItemSignature), FieldTy(ref d) => op(d).map(FieldTy), SizedConstraint(ref d) => op(d).map(SizedConstraint), - TraitItemDefIds(ref d) => op(d).map(TraitItemDefIds), + ImplOrTraitItemDefIds(ref d) => op(d).map(ImplOrTraitItemDefIds), InherentImpls(ref d) => op(d).map(InherentImpls), - ImplItems(ref d) => op(d).map(ImplItems), TraitImpls(ref d) => op(d).map(TraitImpls), TraitItems(ref d) => op(d).map(TraitItems), ReprHints(ref d) => op(d).map(ReprHints), - TraitSelect(ref d, ref type_ds) => { - let d = try_opt!(op(d)); + TraitSelect(ref type_ds) => { let type_ds = try_opt!(type_ds.iter().map(|d| op(d)).collect()); - Some(TraitSelect(d, type_ds)) + Some(TraitSelect(type_ds)) } } } diff --git a/src/librustc/dep_graph/dep_tracking_map.rs b/src/librustc/dep_graph/dep_tracking_map.rs index 88cd1efd34..51f7890c7a 100644 --- a/src/librustc/dep_graph/dep_tracking_map.rs +++ b/src/librustc/dep_graph/dep_tracking_map.rs @@ -80,6 +80,17 @@ impl DepTrackingMap { pub fn keys(&self) -> Vec { self.map.keys().cloned().collect() } + + /// Append `elem` to the vector stored for `k`, creating a new vector if needed. + /// This is considered a write to `k`. + pub fn push(&mut self, k: M::Key, elem: E) + where M: DepTrackingMapConfig> + { + self.write(&k); + self.map.entry(k) + .or_insert(Vec::new()) + .push(elem); + } } impl MemoizationMap for RefCell> { diff --git a/src/librustc/dep_graph/graph.rs b/src/librustc/dep_graph/graph.rs index bb027b11b4..c42eeead69 100644 --- a/src/librustc/dep_graph/graph.rs +++ b/src/librustc/dep_graph/graph.rs @@ -46,7 +46,7 @@ impl DepGraph { data: Rc::new(DepGraphData { thread: DepGraphThreadData::new(enabled), previous_work_products: RefCell::new(FnvHashMap()), - work_products: RefCell::new(FnvHashMap()) + work_products: RefCell::new(FnvHashMap()), }) } } diff --git a/src/librustc/dep_graph/mod.rs b/src/librustc/dep_graph/mod.rs index a499cb10f2..9c00e95c17 100644 --- a/src/librustc/dep_graph/mod.rs +++ b/src/librustc/dep_graph/mod.rs @@ -15,6 +15,7 @@ mod edges; mod graph; mod query; mod raii; +mod shadow; mod thread; mod visit; diff --git a/src/librustc/dep_graph/raii.rs b/src/librustc/dep_graph/raii.rs index c43d493d17..e4f572902f 100644 --- a/src/librustc/dep_graph/raii.rs +++ b/src/librustc/dep_graph/raii.rs @@ -47,3 +47,4 @@ impl<'graph> Drop for IgnoreTask<'graph> { self.data.enqueue(DepMessage::PopIgnore); } } + diff --git a/src/librustc/dep_graph/shadow.rs b/src/librustc/dep_graph/shadow.rs new file mode 100644 index 0000000000..72a321425e --- /dev/null +++ b/src/librustc/dep_graph/shadow.rs @@ -0,0 +1,145 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The "Shadow Graph" is maintained on the main thread and which +//! tracks each message relating to the dep-graph and applies some +//! sanity checks as they go by. If an error results, it means you get +//! a nice stack-trace telling you precisely what caused the error. +//! +//! NOTE: This is a debugging facility which can potentially have non-trivial +//! runtime impact. Therefore, it is largely compiled out if +//! debug-assertions are not enabled. +//! +//! The basic sanity check, enabled if you have debug assertions +//! enabled, is that there is always a task (or ignore) on the stack +//! when you do read/write, and that the tasks are pushed/popped +//! according to a proper stack discipline. +//! +//! Optionally, if you specify RUST_FORBID_DEP_GRAPH_EDGE, you can +//! specify an edge filter to be applied to each edge as it is +//! created. See `./README.md` for details. + +use hir::def_id::DefId; +use std::cell::{BorrowState, RefCell}; +use std::env; + +use super::DepNode; +use super::thread::DepMessage; +use super::debug::EdgeFilter; + +pub struct ShadowGraph { + // if you push None onto the stack, that corresponds to an Ignore + stack: RefCell>>>, + forbidden_edge: Option, +} + +const ENABLED: bool = cfg!(debug_assertions); + +impl ShadowGraph { + pub fn new() -> Self { + let forbidden_edge = if !ENABLED { + None + } else { + match env::var("RUST_FORBID_DEP_GRAPH_EDGE") { + Ok(s) => { + match EdgeFilter::new(&s) { + Ok(f) => Some(f), + Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err), + } + } + Err(_) => None, + } + }; + + ShadowGraph { + stack: RefCell::new(vec![]), + forbidden_edge: forbidden_edge, + } + } + + pub fn enqueue(&self, message: &DepMessage) { + if ENABLED { + match self.stack.borrow_state() { + BorrowState::Unused => {} + _ => { + // When we apply edge filters, that invokes the + // Debug trait on DefIds, which in turn reads from + // various bits of state and creates reads! Ignore + // those recursive reads. + return; + } + } + + let mut stack = self.stack.borrow_mut(); + match *message { + DepMessage::Read(ref n) => self.check_edge(Some(Some(n)), top(&stack)), + DepMessage::Write(ref n) => self.check_edge(top(&stack), Some(Some(n))), + DepMessage::PushTask(ref n) => stack.push(Some(n.clone())), + DepMessage::PushIgnore => stack.push(None), + DepMessage::PopTask(ref n) => { + match stack.pop() { + Some(Some(m)) => { + if *n != m { + bug!("stack mismatch: found {:?} expected {:?}", m, n) + } + } + Some(None) => bug!("stack mismatch: found Ignore expected {:?}", n), + None => bug!("stack mismatch: found empty stack, expected {:?}", n), + } + } + DepMessage::PopIgnore => { + match stack.pop() { + Some(Some(m)) => bug!("stack mismatch: found {:?} expected ignore", m), + Some(None) => (), + None => bug!("stack mismatch: found empty stack, expected ignore"), + } + } + DepMessage::Query => (), + } + } + } + + fn check_edge(&self, + source: Option>>, + target: Option>>) { + assert!(ENABLED); + match (source, target) { + // cannot happen, one side is always Some(Some(_)) + (None, None) => unreachable!(), + + // nothing on top of the stack + (None, Some(n)) | (Some(n), None) => bug!("read/write of {:?} but no current task", n), + + // this corresponds to an Ignore being top of the stack + (Some(None), _) | (_, Some(None)) => (), + + // a task is on top of the stack + (Some(Some(source)), Some(Some(target))) => { + if let Some(ref forbidden_edge) = self.forbidden_edge { + if forbidden_edge.test(source, target) { + bug!("forbidden edge {:?} -> {:?} created", source, target) + } + } + } + } + } +} + +// Do a little juggling: we get back a reference to an option at the +// top of the stack, convert it to an optional reference. +fn top<'s>(stack: &'s Vec>>) -> Option>> { + stack.last() + .map(|n: &'s Option>| -> Option<&'s DepNode> { + // (*) + // (*) type annotation just there to clarify what would + // otherwise be some *really* obscure code + n.as_ref() + }) +} diff --git a/src/librustc/dep_graph/thread.rs b/src/librustc/dep_graph/thread.rs index 4e16fae187..90c42d66b7 100644 --- a/src/librustc/dep_graph/thread.rs +++ b/src/librustc/dep_graph/thread.rs @@ -20,13 +20,13 @@ use hir::def_id::DefId; use rustc_data_structures::veccell::VecCell; -use std::cell::Cell; use std::sync::mpsc::{self, Sender, Receiver}; use std::thread; use super::DepGraphQuery; use super::DepNode; use super::edges::DepGraphEdges; +use super::shadow::ShadowGraph; #[derive(Debug)] pub enum DepMessage { @@ -42,12 +42,16 @@ pub enum DepMessage { pub struct DepGraphThreadData { enabled: bool, - // Local counter that just tracks how many tasks are pushed onto the - // stack, so that we still get an error in the case where one is - // missing. If dep-graph construction is enabled, we'd get the same - // error when processing tasks later on, but that's annoying because - // it lacks precision about the source of the error. - tasks_pushed: Cell, + // The "shadow graph" is a debugging aid. We give it each message + // in real time as it arrives and it checks for various errors + // (for example, a read/write when there is no current task; it + // can also apply user-defined filters; see `shadow` module for + // details). This only occurs if debug-assertions are enabled. + // + // Note that in some cases the same errors will occur when the + // data is processed off the main thread, but that's annoying + // because it lacks precision about the source of the error. + shadow_graph: ShadowGraph, // current buffer, where we accumulate messages messages: VecCell, @@ -76,7 +80,7 @@ impl DepGraphThreadData { DepGraphThreadData { enabled: enabled, - tasks_pushed: Cell::new(0), + shadow_graph: ShadowGraph::new(), messages: VecCell::with_capacity(INITIAL_CAPACITY), swap_in: rx2, swap_out: tx1, @@ -118,21 +122,7 @@ impl DepGraphThreadData { /// the buffer is full, this may swap.) #[inline] pub fn enqueue(&self, message: DepMessage) { - // Regardless of whether dep graph construction is enabled, we - // still want to check that we always have a valid task on the - // stack when a read/write/etc event occurs. - match message { - DepMessage::Read(_) | DepMessage::Write(_) => - if self.tasks_pushed.get() == 0 { - self.invalid_message("read/write but no current task") - }, - DepMessage::PushTask(_) | DepMessage::PushIgnore => - self.tasks_pushed.set(self.tasks_pushed.get() + 1), - DepMessage::PopTask(_) | DepMessage::PopIgnore => - self.tasks_pushed.set(self.tasks_pushed.get() - 1), - DepMessage::Query => - (), - } + self.shadow_graph.enqueue(&message); if self.enabled { self.enqueue_enabled(message); @@ -147,11 +137,6 @@ impl DepGraphThreadData { self.swap(); } } - - // Outline this too. - fn invalid_message(&self, string: &str) { - bug!("{}; see src/librustc/dep_graph/README.md for more information", string) - } } /// Definition of the depgraph thread. diff --git a/src/librustc/diagnostics.rs b/src/librustc/diagnostics.rs index 07e54dc9e8..12a1a42552 100644 --- a/src/librustc/diagnostics.rs +++ b/src/librustc/diagnostics.rs @@ -1327,30 +1327,6 @@ let x: i32 = "I am not a number!"; // | // type `i32` assigned to variable `x` ``` - -Another situation in which this occurs is when you attempt to use the `try!` -macro inside a function that does not return a `Result`: - -```compile_fail,E0308 -use std::fs::File; - -fn main() { - let mut f = try!(File::create("foo.txt")); -} -``` - -This code gives an error like this: - -```text -:5:8: 6:42 error: mismatched types: - expected `()`, - found `core::result::Result<_, _>` - (expected (), - found enum `core::result::Result`) [E0308] -``` - -`try!` returns a `Result`, and so the function must. But `main()` has -`()` as its return type, hence the error. "##, E0309: r##" @@ -1527,6 +1503,37 @@ fn main() { ``` "##, +E0478: r##" +A lifetime bound was not satisfied. + +Erroneous code example: + +```compile_fail,E0478 +// Check that the explicit lifetime bound (`'SnowWhite`, in this example) must +// outlive all the superbounds from the trait (`'kiss`, in this example). + +trait Wedding<'t>: 't { } + +struct Prince<'kiss, 'SnowWhite> { + child: Box + 'SnowWhite>, + // error: lifetime bound not satisfied +} +``` + +In this example, the `'SnowWhite` lifetime is supposed to outlive the `'kiss` +lifetime but the declaration of the `Prince` struct doesn't enforce it. To fix +this issue, you need to specify it: + +``` +trait Wedding<'t>: 't { } + +struct Prince<'kiss, 'SnowWhite: 'kiss> { // You say here that 'kiss must live + // longer than 'SnowWhite. + child: Box + 'SnowWhite>, // And now it's all good! +} +``` +"##, + E0496: r##" A lifetime name is shadowing another lifetime name. Erroneous code example: @@ -1690,6 +1697,50 @@ fn cookie() -> ! { // error: definition of an unknown language item: `cookie` ``` "##, +E0525: r##" +A closure was attempted to get used whereas it doesn't implement the expected +trait. + +Erroneous code example: + +```compile_fail,E0525 +struct X; + +fn foo(_: T) {} +fn bar(_: T) {} + +fn main() { + let x = X; + let closure = |_| foo(x); // error: expected a closure that implements + // the `Fn` trait, but this closure only + // implements `FnOnce` + bar(closure); +} +``` + +In the example above, `closure` is an `FnOnce` closure whereas the `bar` +function expected an `Fn` closure. In this case, it's simple to fix the issue, +you just have to implement `Copy` and `Clone` traits on `struct X` and it'll +be ok: + +``` +#[derive(Clone, Copy)] // We implement `Clone` and `Copy` traits. +struct X; + +fn foo(_: T) {} +fn bar(_: T) {} + +fn main() { + let x = X; + let closure = |_| foo(x); + bar(closure); // ok! +} +``` + +To understand better how closures work in Rust, read: +https://doc.rust-lang.org/book/closures.html +"##, + } @@ -1715,7 +1766,6 @@ register_diagnostics! { E0475, // index of slice outside its lifetime E0476, // lifetime of the source pointer does not outlive lifetime bound... E0477, // the type `..` does not fulfill the required lifetime... - E0478, // lifetime bound not satisfied E0479, // the type `..` (provided as the value of a type parameter) is... E0480, // lifetime of method receiver does not outlive the method call E0481, // lifetime of function argument does not outlive the function call @@ -1730,5 +1780,5 @@ register_diagnostics! { E0490, // a value of type `..` is borrowed for too long E0491, // in type `..`, reference has a longer lifetime than the data it... E0495, // cannot infer an appropriate lifetime due to conflicting requirements - E0525 // expected a closure that implements `..` but this closure only implements `..` + E0566 // conflicting representation hints } diff --git a/src/librustc/hir/check_attr.rs b/src/librustc/hir/check_attr.rs index a1c04dfcab..8ba52cdb64 100644 --- a/src/librustc/hir/check_attr.rs +++ b/src/librustc/hir/check_attr.rs @@ -11,7 +11,6 @@ use session::Session; use syntax::ast; -use syntax::attr::AttrMetaMethods; use syntax::visit; use syntax::visit::Visitor; @@ -19,6 +18,7 @@ use syntax::visit::Visitor; enum Target { Fn, Struct, + Union, Enum, Other, } @@ -28,6 +28,7 @@ impl Target { match item.node { ast::ItemKind::Fn(..) => Target::Fn, ast::ItemKind::Struct(..) => Target::Struct, + ast::ItemKind::Union(..) => Target::Union, ast::ItemKind::Enum(..) => Target::Enum, _ => Target::Other, } @@ -41,7 +42,9 @@ struct CheckAttrVisitor<'a> { impl<'a> CheckAttrVisitor<'a> { fn check_inline(&self, attr: &ast::Attribute, target: Target) { if target != Target::Fn { - span_err!(self.sess, attr.span, E0518, "attribute should be applied to function"); + struct_span_err!(self.sess, attr.span, E0518, "attribute should be applied to function") + .span_label(attr.span, &format!("requires a function")) + .emit(); } } @@ -52,20 +55,43 @@ impl<'a> CheckAttrVisitor<'a> { return; } }; + + let mut conflicting_reprs = 0; for word in words { - let word: &str = &word.name(); - let message = match word { + + let name = match word.name() { + Some(word) => word, + None => continue, + }; + + let (message, label) = match &*name { "C" => { - if target != Target::Struct && target != Target::Enum { - "attribute should be applied to struct or enum" + conflicting_reprs += 1; + if target != Target::Struct && + target != Target::Union && + target != Target::Enum { + ("attribute should be applied to struct, enum or union", + "a struct, enum or union") + } else { + continue + } + } + "packed" => { + // Do not increment conflicting_reprs here, because "packed" + // can be used to modify another repr hint + if target != Target::Struct && + target != Target::Union { + ("attribute should be applied to struct or union", + "a struct or union") } else { continue } } - "packed" | "simd" => { + conflicting_reprs += 1; if target != Target::Struct { - "attribute should be applied to struct" + ("attribute should be applied to struct", + "a struct") } else { continue } @@ -73,15 +99,23 @@ impl<'a> CheckAttrVisitor<'a> { "i8" | "u8" | "i16" | "u16" | "i32" | "u32" | "i64" | "u64" | "isize" | "usize" => { + conflicting_reprs += 1; if target != Target::Enum { - "attribute should be applied to enum" + ("attribute should be applied to enum", + "an enum") } else { continue } } _ => continue, }; - span_err!(self.sess, attr.span, E0517, "{}", message); + struct_span_err!(self.sess, attr.span, E0517, "{}", message) + .span_label(attr.span, &format!("requires {}", label)) + .emit(); + } + if conflicting_reprs > 1 { + span_warn!(self.sess, attr.span, E0566, + "conflicting representation hints"); } } diff --git a/src/librustc/hir/def.rs b/src/librustc/hir/def.rs index 218681efb7..dec8ea8a29 100644 --- a/src/librustc/hir/def.rs +++ b/src/librustc/hir/def.rs @@ -9,7 +9,6 @@ // except according to those terms. use hir::def_id::DefId; -use ty::subst::ParamSpace; use util::nodemap::NodeMap; use syntax::ast; use hir; @@ -17,23 +16,20 @@ use hir; #[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] pub enum Def { Fn(DefId), - SelfTy(Option /* trait */, Option /* impl */), + SelfTy(Option /* trait */, Option /* impl */), Mod(DefId), - ForeignMod(DefId), Static(DefId, bool /* is_mutbl */), Const(DefId), AssociatedConst(DefId), - Local(DefId, // def id of variable - ast::NodeId), // node id of variable - Variant(DefId /* enum */, DefId /* variant */), + Local(DefId), + Variant(DefId), Enum(DefId), TyAlias(DefId), - AssociatedTy(DefId /* trait */, DefId), + AssociatedTy(DefId), Trait(DefId), PrimTy(hir::PrimTy), - TyParam(ParamSpace, u32, DefId, ast::Name), + TyParam(DefId), Upvar(DefId, // def id of closed over local - ast::NodeId, // node id of closed over local usize, // index in the freevars list of the closure ast::NodeId), // expr node that creates the closure @@ -42,6 +38,7 @@ pub enum Def { // If Def::Struct lives in value namespace (e.g. tuple struct, unit struct expressions) // it denotes a constructor and its DefId refers to NodeId of the struct's constructor. Struct(DefId), + Union(DefId), Label(ast::NodeId), Method(DefId), Err, @@ -94,37 +91,20 @@ pub type DefMap = NodeMap; // within. pub type ExportMap = NodeMap>; -#[derive(Copy, Clone)] +#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] pub struct Export { pub name: ast::Name, // The name of the target. pub def_id: DefId, // The definition of the target. } impl Def { - pub fn var_id(&self) -> ast::NodeId { - match *self { - Def::Local(_, id) | - Def::Upvar(_, id, _, _) => { - id - } - - Def::Fn(..) | Def::Mod(..) | Def::ForeignMod(..) | Def::Static(..) | - Def::Variant(..) | Def::Enum(..) | Def::TyAlias(..) | Def::AssociatedTy(..) | - Def::TyParam(..) | Def::Struct(..) | Def::Trait(..) | - Def::Method(..) | Def::Const(..) | Def::AssociatedConst(..) | - Def::PrimTy(..) | Def::Label(..) | Def::SelfTy(..) | Def::Err => { - bug!("attempted .var_id() on invalid {:?}", self) - } - } - } - pub fn def_id(&self) -> DefId { match *self { - Def::Fn(id) | Def::Mod(id) | Def::ForeignMod(id) | Def::Static(id, _) | - Def::Variant(_, id) | Def::Enum(id) | Def::TyAlias(id) | Def::AssociatedTy(_, id) | - Def::TyParam(_, _, id, _) | Def::Struct(id) | Def::Trait(id) | + Def::Fn(id) | Def::Mod(id) | Def::Static(id, _) | + Def::Variant(id) | Def::Enum(id) | Def::TyAlias(id) | Def::AssociatedTy(id) | + Def::TyParam(id) | Def::Struct(id) | Def::Union(id) | Def::Trait(id) | Def::Method(id) | Def::Const(id) | Def::AssociatedConst(id) | - Def::Local(id, _) | Def::Upvar(id, _, _, _) => { + Def::Local(id) | Def::Upvar(id, ..) => { id } @@ -141,13 +121,13 @@ impl Def { match *self { Def::Fn(..) => "function", Def::Mod(..) => "module", - Def::ForeignMod(..) => "foreign module", Def::Static(..) => "static", Def::Variant(..) => "variant", Def::Enum(..) => "enum", Def::TyAlias(..) => "type", Def::AssociatedTy(..) => "associated type", Def::Struct(..) => "struct", + Def::Union(..) => "union", Def::Trait(..) => "trait", Def::Method(..) => "method", Def::Const(..) => "constant", diff --git a/src/librustc/hir/def_id.rs b/src/librustc/hir/def_id.rs index a3b83ec5be..399243551d 100644 --- a/src/librustc/hir/def_id.rs +++ b/src/librustc/hir/def_id.rs @@ -8,12 +8,69 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::cstore::LOCAL_CRATE; use ty; -use syntax::ast::CrateNum; + +use rustc_data_structures::indexed_vec::Idx; +use serialize::{self, Encoder, Decoder}; + use std::fmt; use std::u32; +#[derive(Clone, Copy, Eq, Ord, PartialOrd, PartialEq, Hash, Debug)] +pub struct CrateNum(u32); + +impl Idx for CrateNum { + fn new(value: usize) -> Self { + assert!(value < (u32::MAX) as usize); + CrateNum(value as u32) + } + + fn index(self) -> usize { + self.0 as usize + } +} + +/// Item definitions in the currently-compiled crate would have the CrateNum +/// LOCAL_CRATE in their DefId. +pub const LOCAL_CRATE: CrateNum = CrateNum(0); + +impl CrateNum { + pub fn new(x: usize) -> CrateNum { + assert!(x < (u32::MAX as usize)); + CrateNum(x as u32) + } + + pub fn from_u32(x: u32) -> CrateNum { + CrateNum(x) + } + + pub fn as_usize(&self) -> usize { + self.0 as usize + } + + pub fn as_u32(&self) -> u32 { + self.0 + } +} + +impl fmt::Display for CrateNum { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} + +impl serialize::UseSpecializedEncodable for CrateNum { + fn default_encode(&self, s: &mut S) -> Result<(), S::Error> { + s.emit_u32(self.0) + } +} + +impl serialize::UseSpecializedDecodable for CrateNum { + fn default_decode(d: &mut D) -> Result { + d.read_u32().map(CrateNum) + } +} + /// A DefIndex is an index into the hir-map for a crate, identifying a /// particular definition. It should really be considered an interned /// shorthand for a particular DefPath. @@ -46,8 +103,7 @@ pub const CRATE_DEF_INDEX: DefIndex = DefIndex(0); /// A DefId identifies a particular *definition*, by combining a crate /// index and a def index. -#[derive(Clone, Eq, Ord, PartialOrd, PartialEq, RustcEncodable, - RustcDecodable, Hash, Copy)] +#[derive(Clone, Eq, Ord, PartialOrd, PartialEq, RustcEncodable, RustcDecodable, Hash, Copy)] pub struct DefId { pub krate: CrateNum, pub index: DefIndex, @@ -58,19 +114,14 @@ impl fmt::Debug for DefId { write!(f, "DefId {{ krate: {:?}, node: {:?}", self.krate, self.index)?; - // Unfortunately, there seems to be no way to attempt to print - // a path for a def-id, so I'll just make a best effort for now - // and otherwise fallback to just printing the crate/node pair - if self.is_local() { // (1) - // (1) side-step fact that not all external things have paths at - // the moment, such as type parameters - ty::tls::with_opt(|opt_tcx| { - if let Some(tcx) = opt_tcx { - write!(f, " => {}", tcx.item_path_str(*self))?; + ty::tls::with_opt(|opt_tcx| { + if let Some(tcx) = opt_tcx { + if let Some(def_path) = tcx.opt_def_path(*self) { + write!(f, " => {}", def_path.to_string(tcx))?; } - Ok(()) - })?; - } + } + Ok(()) + })?; write!(f, " }}") } diff --git a/src/librustc/hir/fold.rs b/src/librustc/hir/fold.rs deleted file mode 100644 index 0edfd16bdf..0000000000 --- a/src/librustc/hir/fold.rs +++ /dev/null @@ -1,1115 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A Folder represents an HIR->HIR fold; it accepts a HIR piece, -//! and returns a piece of the same type. - -use hir::*; -use syntax::ast::{Name, NodeId, DUMMY_NODE_ID, Attribute, Attribute_, MetaItem}; -use syntax::ast::MetaItemKind; -use hir; -use syntax_pos::Span; -use syntax::codemap::{respan, Spanned}; -use syntax::ptr::P; -use syntax::parse::token::keywords; -use syntax::util::move_map::MoveMap; - -pub trait Folder : Sized { - // Any additions to this trait should happen in form - // of a call to a public `noop_*` function that only calls - // out to the folder again, not other `noop_*` functions. - // - // This is a necessary API workaround to the problem of not - // being able to call out to the super default method - // in an overridden default method. - - fn fold_crate(&mut self, c: Crate) -> Crate { - noop_fold_crate(c, self) - } - - fn fold_meta_items(&mut self, meta_items: HirVec>) -> HirVec> { - noop_fold_meta_items(meta_items, self) - } - - fn fold_meta_item(&mut self, meta_item: P) -> P { - noop_fold_meta_item(meta_item, self) - } - - fn fold_view_path(&mut self, view_path: P) -> P { - noop_fold_view_path(view_path, self) - } - - fn fold_foreign_item(&mut self, ni: ForeignItem) -> ForeignItem { - noop_fold_foreign_item(ni, self) - } - - fn fold_item(&mut self, i: Item) -> Item { - noop_fold_item(i, self) - } - - fn fold_item_id(&mut self, i: ItemId) -> ItemId { - noop_fold_item_id(i, self) - } - - fn fold_struct_field(&mut self, sf: StructField) -> StructField { - noop_fold_struct_field(sf, self) - } - - fn fold_item_underscore(&mut self, i: Item_) -> Item_ { - noop_fold_item_underscore(i, self) - } - - fn fold_trait_item(&mut self, i: TraitItem) -> TraitItem { - noop_fold_trait_item(i, self) - } - - fn fold_impl_item(&mut self, i: ImplItem) -> ImplItem { - noop_fold_impl_item(i, self) - } - - fn fold_fn_decl(&mut self, d: P) -> P { - noop_fold_fn_decl(d, self) - } - - fn fold_block(&mut self, b: P) -> P { - noop_fold_block(b, self) - } - - fn fold_stmt(&mut self, s: Stmt) -> Stmt { - noop_fold_stmt(s, self) - } - - fn fold_arm(&mut self, a: Arm) -> Arm { - noop_fold_arm(a, self) - } - - fn fold_pat(&mut self, p: P) -> P { - noop_fold_pat(p, self) - } - - fn fold_decl(&mut self, d: P) -> P { - noop_fold_decl(d, self) - } - - fn fold_expr(&mut self, e: P) -> P { - e.map(|e| noop_fold_expr(e, self)) - } - - fn fold_ty(&mut self, t: P) -> P { - noop_fold_ty(t, self) - } - - fn fold_ty_binding(&mut self, t: TypeBinding) -> TypeBinding { - noop_fold_ty_binding(t, self) - } - - fn fold_mod(&mut self, m: Mod) -> Mod { - noop_fold_mod(m, self) - } - - fn fold_foreign_mod(&mut self, nm: ForeignMod) -> ForeignMod { - noop_fold_foreign_mod(nm, self) - } - - fn fold_variant(&mut self, v: Variant) -> Variant { - noop_fold_variant(v, self) - } - - fn fold_name(&mut self, n: Name) -> Name { - noop_fold_name(n, self) - } - - fn fold_usize(&mut self, i: usize) -> usize { - noop_fold_usize(i, self) - } - - fn fold_path(&mut self, p: Path) -> Path { - noop_fold_path(p, self) - } - - fn fold_path_parameters(&mut self, p: PathParameters) -> PathParameters { - noop_fold_path_parameters(p, self) - } - - fn fold_angle_bracketed_parameter_data(&mut self, - p: AngleBracketedParameterData) - -> AngleBracketedParameterData { - noop_fold_angle_bracketed_parameter_data(p, self) - } - - fn fold_parenthesized_parameter_data(&mut self, - p: ParenthesizedParameterData) - -> ParenthesizedParameterData { - noop_fold_parenthesized_parameter_data(p, self) - } - - fn fold_local(&mut self, l: P) -> P { - noop_fold_local(l, self) - } - - fn fold_lifetime(&mut self, l: Lifetime) -> Lifetime { - noop_fold_lifetime(l, self) - } - - fn fold_lifetime_def(&mut self, l: LifetimeDef) -> LifetimeDef { - noop_fold_lifetime_def(l, self) - } - - fn fold_attribute(&mut self, at: Attribute) -> Option { - noop_fold_attribute(at, self) - } - - fn fold_arg(&mut self, a: Arg) -> Arg { - noop_fold_arg(a, self) - } - - fn fold_generics(&mut self, generics: Generics) -> Generics { - noop_fold_generics(generics, self) - } - - fn fold_trait_ref(&mut self, p: TraitRef) -> TraitRef { - noop_fold_trait_ref(p, self) - } - - fn fold_poly_trait_ref(&mut self, p: PolyTraitRef) -> PolyTraitRef { - noop_fold_poly_trait_ref(p, self) - } - - fn fold_variant_data(&mut self, vdata: VariantData) -> VariantData { - noop_fold_variant_data(vdata, self) - } - - fn fold_lifetimes(&mut self, lts: HirVec) -> HirVec { - noop_fold_lifetimes(lts, self) - } - - fn fold_lifetime_defs(&mut self, lts: HirVec) -> HirVec { - noop_fold_lifetime_defs(lts, self) - } - - fn fold_ty_param(&mut self, tp: TyParam) -> TyParam { - noop_fold_ty_param(tp, self) - } - - fn fold_ty_params(&mut self, tps: HirVec) -> HirVec { - noop_fold_ty_params(tps, self) - } - - fn fold_opt_lifetime(&mut self, o_lt: Option) -> Option { - noop_fold_opt_lifetime(o_lt, self) - } - - fn fold_opt_bounds(&mut self, - b: Option) - -> Option { - noop_fold_opt_bounds(b, self) - } - - fn fold_bounds(&mut self, b: TyParamBounds) -> TyParamBounds { - noop_fold_bounds(b, self) - } - - fn fold_ty_param_bound(&mut self, tpb: TyParamBound) -> TyParamBound { - noop_fold_ty_param_bound(tpb, self) - } - - fn fold_mt(&mut self, mt: MutTy) -> MutTy { - noop_fold_mt(mt, self) - } - - fn fold_field(&mut self, field: Field) -> Field { - noop_fold_field(field, self) - } - - fn fold_where_clause(&mut self, where_clause: WhereClause) -> WhereClause { - noop_fold_where_clause(where_clause, self) - } - - fn fold_where_predicate(&mut self, where_predicate: WherePredicate) -> WherePredicate { - noop_fold_where_predicate(where_predicate, self) - } - - /// called for the `id` on each declaration - fn new_id(&mut self, i: NodeId) -> NodeId { - i - } - - /// called for ids that are references (e.g., ItemDef) - fn map_id(&mut self, i: NodeId) -> NodeId { - i - } - - fn new_span(&mut self, sp: Span) -> Span { - sp - } -} - -pub fn noop_fold_meta_items(meta_items: HirVec>, - fld: &mut T) - -> HirVec> { - meta_items.move_map(|x| fld.fold_meta_item(x)) -} - -pub fn noop_fold_view_path(view_path: P, fld: &mut T) -> P { - view_path.map(|Spanned { node, span }| { - Spanned { - node: match node { - ViewPathSimple(name, path) => { - ViewPathSimple(name, fld.fold_path(path)) - } - ViewPathGlob(path) => { - ViewPathGlob(fld.fold_path(path)) - } - ViewPathList(path, path_list_idents) => { - ViewPathList(fld.fold_path(path), - path_list_idents.move_map(|path_list_ident| { - Spanned { - node: match path_list_ident.node { - PathListIdent { id, name, rename } => PathListIdent { - id: fld.new_id(id), - name: name, - rename: rename, - }, - PathListMod { id, rename } => PathListMod { - id: fld.new_id(id), - rename: rename, - }, - }, - span: fld.new_span(path_list_ident.span), - } - })) - } - }, - span: fld.new_span(span), - } - }) -} - -pub fn fold_attrs(attrs: T, fld: &mut F) -> T - where T: Into> + From>, - F: Folder, -{ - attrs.into().move_flat_map(|x| fld.fold_attribute(x)).into() -} - -pub fn noop_fold_arm(Arm { attrs, pats, guard, body }: Arm, fld: &mut T) -> Arm { - Arm { - attrs: fold_attrs(attrs, fld), - pats: pats.move_map(|x| fld.fold_pat(x)), - guard: guard.map(|x| fld.fold_expr(x)), - body: fld.fold_expr(body), - } -} - -pub fn noop_fold_decl(d: P, fld: &mut T) -> P { - d.map(|Spanned { node, span }| { - match node { - DeclLocal(l) => Spanned { - node: DeclLocal(fld.fold_local(l)), - span: fld.new_span(span), - }, - DeclItem(it) => Spanned { - node: DeclItem(fld.fold_item_id(it)), - span: fld.new_span(span), - }, - } - }) -} - -pub fn noop_fold_ty_binding(b: TypeBinding, fld: &mut T) -> TypeBinding { - TypeBinding { - id: fld.new_id(b.id), - name: b.name, - ty: fld.fold_ty(b.ty), - span: fld.new_span(b.span), - } -} - -pub fn noop_fold_ty(t: P, fld: &mut T) -> P { - t.map(|Ty { id, node, span }| { - Ty { - id: fld.new_id(id), - node: match node { - TyInfer => node, - TyVec(ty) => TyVec(fld.fold_ty(ty)), - TyPtr(mt) => TyPtr(fld.fold_mt(mt)), - TyRptr(region, mt) => { - TyRptr(fld.fold_opt_lifetime(region), fld.fold_mt(mt)) - } - TyBareFn(f) => { - TyBareFn(f.map(|BareFnTy { lifetimes, unsafety, abi, decl }| { - BareFnTy { - lifetimes: fld.fold_lifetime_defs(lifetimes), - unsafety: unsafety, - abi: abi, - decl: fld.fold_fn_decl(decl), - } - })) - } - TyNever => node, - TyTup(tys) => TyTup(tys.move_map(|ty| fld.fold_ty(ty))), - TyPath(qself, path) => { - let qself = qself.map(|QSelf { ty, position }| { - QSelf { - ty: fld.fold_ty(ty), - position: position, - } - }); - TyPath(qself, fld.fold_path(path)) - } - TyObjectSum(ty, bounds) => { - TyObjectSum(fld.fold_ty(ty), fld.fold_bounds(bounds)) - } - TyFixedLengthVec(ty, e) => { - TyFixedLengthVec(fld.fold_ty(ty), fld.fold_expr(e)) - } - TyTypeof(expr) => { - TyTypeof(fld.fold_expr(expr)) - } - TyPolyTraitRef(bounds) => { - TyPolyTraitRef(bounds.move_map(|b| fld.fold_ty_param_bound(b))) - } - TyImplTrait(bounds) => { - TyImplTrait(bounds.move_map(|b| fld.fold_ty_param_bound(b))) - } - }, - span: fld.new_span(span), - } - }) -} - -pub fn noop_fold_foreign_mod(ForeignMod { abi, items }: ForeignMod, - fld: &mut T) - -> ForeignMod { - ForeignMod { - abi: abi, - items: items.move_map(|x| fld.fold_foreign_item(x)), - } -} - -pub fn noop_fold_variant(v: Variant, fld: &mut T) -> Variant { - Spanned { - node: Variant_ { - name: v.node.name, - attrs: fold_attrs(v.node.attrs, fld), - data: fld.fold_variant_data(v.node.data), - disr_expr: v.node.disr_expr.map(|e| fld.fold_expr(e)), - }, - span: fld.new_span(v.span), - } -} - -pub fn noop_fold_name(n: Name, _: &mut T) -> Name { - n -} - -pub fn noop_fold_usize(i: usize, _: &mut T) -> usize { - i -} - -pub fn noop_fold_path(Path { global, segments, span }: Path, fld: &mut T) -> Path { - Path { - global: global, - segments: segments.move_map(|PathSegment { name, parameters }| { - PathSegment { - name: fld.fold_name(name), - parameters: fld.fold_path_parameters(parameters), - } - }), - span: fld.new_span(span), - } -} - -pub fn noop_fold_path_parameters(path_parameters: PathParameters, - fld: &mut T) - -> PathParameters { - match path_parameters { - AngleBracketedParameters(data) => - AngleBracketedParameters(fld.fold_angle_bracketed_parameter_data(data)), - ParenthesizedParameters(data) => - ParenthesizedParameters(fld.fold_parenthesized_parameter_data(data)), - } -} - -pub fn noop_fold_angle_bracketed_parameter_data(data: AngleBracketedParameterData, - fld: &mut T) - -> AngleBracketedParameterData { - let AngleBracketedParameterData { lifetimes, types, bindings } = data; - AngleBracketedParameterData { - lifetimes: fld.fold_lifetimes(lifetimes), - types: types.move_map(|ty| fld.fold_ty(ty)), - bindings: bindings.move_map(|b| fld.fold_ty_binding(b)), - } -} - -pub fn noop_fold_parenthesized_parameter_data(data: ParenthesizedParameterData, - fld: &mut T) - -> ParenthesizedParameterData { - let ParenthesizedParameterData { inputs, output, span } = data; - ParenthesizedParameterData { - inputs: inputs.move_map(|ty| fld.fold_ty(ty)), - output: output.map(|ty| fld.fold_ty(ty)), - span: fld.new_span(span), - } -} - -pub fn noop_fold_local(l: P, fld: &mut T) -> P { - l.map(|Local { id, pat, ty, init, span, attrs }| { - Local { - id: fld.new_id(id), - ty: ty.map(|t| fld.fold_ty(t)), - pat: fld.fold_pat(pat), - init: init.map(|e| fld.fold_expr(e)), - span: fld.new_span(span), - attrs: fold_attrs(attrs, fld), - } - }) -} - -pub fn noop_fold_attribute(at: Attribute, fld: &mut T) -> Option { - let Spanned {node: Attribute_ {id, style, value, is_sugared_doc}, span} = at; - Some(Spanned { - node: Attribute_ { - id: id, - style: style, - value: fld.fold_meta_item(value), - is_sugared_doc: is_sugared_doc, - }, - span: fld.new_span(span), - }) -} - -pub fn noop_fold_meta_item(mi: P, fld: &mut T) -> P { - mi.map(|Spanned { node, span }| { - Spanned { - node: match node { - MetaItemKind::Word(id) => MetaItemKind::Word(id), - MetaItemKind::List(id, mis) => { - MetaItemKind::List(id, mis.move_map(|e| fld.fold_meta_item(e))) - } - MetaItemKind::NameValue(id, s) => MetaItemKind::NameValue(id, s), - }, - span: fld.new_span(span), - } - }) -} - -pub fn noop_fold_arg(Arg { id, pat, ty }: Arg, fld: &mut T) -> Arg { - Arg { - id: fld.new_id(id), - pat: fld.fold_pat(pat), - ty: fld.fold_ty(ty), - } -} - -pub fn noop_fold_fn_decl(decl: P, fld: &mut T) -> P { - decl.map(|FnDecl { inputs, output, variadic }| { - FnDecl { - inputs: inputs.move_map(|x| fld.fold_arg(x)), - output: match output { - Return(ty) => Return(fld.fold_ty(ty)), - DefaultReturn(span) => DefaultReturn(span), - }, - variadic: variadic, - } - }) -} - -pub fn noop_fold_ty_param_bound(tpb: TyParamBound, fld: &mut T) -> TyParamBound - where T: Folder -{ - match tpb { - TraitTyParamBound(ty, modifier) => TraitTyParamBound(fld.fold_poly_trait_ref(ty), modifier), - RegionTyParamBound(lifetime) => RegionTyParamBound(fld.fold_lifetime(lifetime)), - } -} - -pub fn noop_fold_ty_param(tp: TyParam, fld: &mut T) -> TyParam { - let TyParam {id, name, bounds, default, span} = tp; - TyParam { - id: fld.new_id(id), - name: name, - bounds: fld.fold_bounds(bounds), - default: default.map(|x| fld.fold_ty(x)), - span: span, - } -} - -pub fn noop_fold_ty_params(tps: HirVec, - fld: &mut T) - -> HirVec { - tps.move_map(|tp| fld.fold_ty_param(tp)) -} - -pub fn noop_fold_lifetime(l: Lifetime, fld: &mut T) -> Lifetime { - Lifetime { - id: fld.new_id(l.id), - name: l.name, - span: fld.new_span(l.span), - } -} - -pub fn noop_fold_lifetime_def(l: LifetimeDef, fld: &mut T) -> LifetimeDef { - LifetimeDef { - lifetime: fld.fold_lifetime(l.lifetime), - bounds: fld.fold_lifetimes(l.bounds), - } -} - -pub fn noop_fold_lifetimes(lts: HirVec, fld: &mut T) -> HirVec { - lts.move_map(|l| fld.fold_lifetime(l)) -} - -pub fn noop_fold_lifetime_defs(lts: HirVec, - fld: &mut T) - -> HirVec { - lts.move_map(|l| fld.fold_lifetime_def(l)) -} - -pub fn noop_fold_opt_lifetime(o_lt: Option, fld: &mut T) -> Option { - o_lt.map(|lt| fld.fold_lifetime(lt)) -} - -pub fn noop_fold_generics(Generics { ty_params, lifetimes, where_clause }: Generics, - fld: &mut T) - -> Generics { - Generics { - ty_params: fld.fold_ty_params(ty_params), - lifetimes: fld.fold_lifetime_defs(lifetimes), - where_clause: fld.fold_where_clause(where_clause), - } -} - -pub fn noop_fold_where_clause(WhereClause { id, predicates }: WhereClause, - fld: &mut T) - -> WhereClause { - WhereClause { - id: fld.new_id(id), - predicates: predicates.move_map(|predicate| fld.fold_where_predicate(predicate)), - } -} - -pub fn noop_fold_where_predicate(pred: WherePredicate, fld: &mut T) -> WherePredicate { - match pred { - hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{bound_lifetimes, - bounded_ty, - bounds, - span}) => { - hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate { - bound_lifetimes: fld.fold_lifetime_defs(bound_lifetimes), - bounded_ty: fld.fold_ty(bounded_ty), - bounds: bounds.move_map(|x| fld.fold_ty_param_bound(x)), - span: fld.new_span(span), - }) - } - hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{lifetime, - bounds, - span}) => { - hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate { - span: fld.new_span(span), - lifetime: fld.fold_lifetime(lifetime), - bounds: bounds.move_map(|bound| fld.fold_lifetime(bound)), - }) - } - hir::WherePredicate::EqPredicate(hir::WhereEqPredicate{id, - path, - ty, - span}) => { - hir::WherePredicate::EqPredicate(hir::WhereEqPredicate { - id: fld.new_id(id), - path: fld.fold_path(path), - ty: fld.fold_ty(ty), - span: fld.new_span(span), - }) - } - } -} - -pub fn noop_fold_variant_data(vdata: VariantData, fld: &mut T) -> VariantData { - match vdata { - VariantData::Struct(fields, id) => { - VariantData::Struct(fields.move_map(|f| fld.fold_struct_field(f)), - fld.new_id(id)) - } - VariantData::Tuple(fields, id) => { - VariantData::Tuple(fields.move_map(|f| fld.fold_struct_field(f)), - fld.new_id(id)) - } - VariantData::Unit(id) => VariantData::Unit(fld.new_id(id)), - } -} - -pub fn noop_fold_trait_ref(p: TraitRef, fld: &mut T) -> TraitRef { - let id = fld.new_id(p.ref_id); - let TraitRef { - path, - ref_id: _, - } = p; - hir::TraitRef { - path: fld.fold_path(path), - ref_id: id, - } -} - -pub fn noop_fold_poly_trait_ref(p: PolyTraitRef, fld: &mut T) -> PolyTraitRef { - hir::PolyTraitRef { - bound_lifetimes: fld.fold_lifetime_defs(p.bound_lifetimes), - trait_ref: fld.fold_trait_ref(p.trait_ref), - span: fld.new_span(p.span), - } -} - -pub fn noop_fold_struct_field(f: StructField, fld: &mut T) -> StructField { - StructField { - span: fld.new_span(f.span), - id: fld.new_id(f.id), - name: f.name, - vis: f.vis, - ty: fld.fold_ty(f.ty), - attrs: fold_attrs(f.attrs, fld), - } -} - -pub fn noop_fold_field(Field { name, expr, span }: Field, folder: &mut T) -> Field { - Field { - name: respan(folder.new_span(name.span), folder.fold_name(name.node)), - expr: folder.fold_expr(expr), - span: folder.new_span(span), - } -} - -pub fn noop_fold_mt(MutTy { ty, mutbl }: MutTy, folder: &mut T) -> MutTy { - MutTy { - ty: folder.fold_ty(ty), - mutbl: mutbl, - } -} - -pub fn noop_fold_opt_bounds(b: Option, - folder: &mut T) - -> Option { - b.map(|bounds| folder.fold_bounds(bounds)) -} - -fn noop_fold_bounds(bounds: TyParamBounds, folder: &mut T) -> TyParamBounds { - bounds.move_map(|bound| folder.fold_ty_param_bound(bound)) -} - -pub fn noop_fold_block(b: P, folder: &mut T) -> P { - b.map(|Block { id, stmts, expr, rules, span }| { - Block { - id: folder.new_id(id), - stmts: stmts.move_map(|s| folder.fold_stmt(s)), - expr: expr.map(|x| folder.fold_expr(x)), - rules: rules, - span: folder.new_span(span), - } - }) -} - -pub fn noop_fold_item_underscore(i: Item_, folder: &mut T) -> Item_ { - match i { - ItemExternCrate(string) => ItemExternCrate(string), - ItemUse(view_path) => { - ItemUse(folder.fold_view_path(view_path)) - } - ItemStatic(t, m, e) => { - ItemStatic(folder.fold_ty(t), m, folder.fold_expr(e)) - } - ItemConst(t, e) => { - ItemConst(folder.fold_ty(t), folder.fold_expr(e)) - } - ItemFn(decl, unsafety, constness, abi, generics, body) => { - ItemFn(folder.fold_fn_decl(decl), - unsafety, - constness, - abi, - folder.fold_generics(generics), - folder.fold_block(body)) - } - ItemMod(m) => ItemMod(folder.fold_mod(m)), - ItemForeignMod(nm) => ItemForeignMod(folder.fold_foreign_mod(nm)), - ItemTy(t, generics) => { - ItemTy(folder.fold_ty(t), folder.fold_generics(generics)) - } - ItemEnum(enum_definition, generics) => { - ItemEnum(hir::EnumDef { - variants: enum_definition.variants.move_map(|x| folder.fold_variant(x)), - }, - folder.fold_generics(generics)) - } - ItemStruct(struct_def, generics) => { - let struct_def = folder.fold_variant_data(struct_def); - ItemStruct(struct_def, folder.fold_generics(generics)) - } - ItemDefaultImpl(unsafety, ref trait_ref) => { - ItemDefaultImpl(unsafety, folder.fold_trait_ref((*trait_ref).clone())) - } - ItemImpl(unsafety, polarity, generics, ifce, ty, impl_items) => { - let new_impl_items = impl_items - .move_map(|item| folder.fold_impl_item(item)); - let ifce = match ifce { - None => None, - Some(ref trait_ref) => { - Some(folder.fold_trait_ref((*trait_ref).clone())) - } - }; - ItemImpl(unsafety, - polarity, - folder.fold_generics(generics), - ifce, - folder.fold_ty(ty), - new_impl_items) - } - ItemTrait(unsafety, generics, bounds, items) => { - let bounds = folder.fold_bounds(bounds); - let items = items.move_map(|item| folder.fold_trait_item(item)); - ItemTrait(unsafety, folder.fold_generics(generics), bounds, items) - } - } -} - -pub fn noop_fold_trait_item(i: TraitItem, - folder: &mut T) - -> TraitItem { - TraitItem { - id: folder.new_id(i.id), - name: folder.fold_name(i.name), - attrs: fold_attrs(i.attrs, folder), - node: match i.node { - ConstTraitItem(ty, default) => { - ConstTraitItem(folder.fold_ty(ty), default.map(|x| folder.fold_expr(x))) - } - MethodTraitItem(sig, body) => { - MethodTraitItem(noop_fold_method_sig(sig, folder), - body.map(|x| folder.fold_block(x))) - } - TypeTraitItem(bounds, default) => { - TypeTraitItem(folder.fold_bounds(bounds), - default.map(|x| folder.fold_ty(x))) - } - }, - span: folder.new_span(i.span), - } -} - -pub fn noop_fold_impl_item(i: ImplItem, folder: &mut T) -> ImplItem { - ImplItem { - id: folder.new_id(i.id), - name: folder.fold_name(i.name), - attrs: fold_attrs(i.attrs, folder), - vis: i.vis, - defaultness: i.defaultness, - node: match i.node { - ImplItemKind::Const(ty, expr) => { - ImplItemKind::Const(folder.fold_ty(ty), folder.fold_expr(expr)) - } - ImplItemKind::Method(sig, body) => { - ImplItemKind::Method(noop_fold_method_sig(sig, folder), folder.fold_block(body)) - } - ImplItemKind::Type(ty) => ImplItemKind::Type(folder.fold_ty(ty)), - }, - span: folder.new_span(i.span), - } -} - -pub fn noop_fold_mod(Mod { inner, item_ids }: Mod, folder: &mut T) -> Mod { - Mod { - inner: folder.new_span(inner), - item_ids: item_ids.move_map(|x| folder.fold_item_id(x)), - } -} - -pub fn noop_fold_crate(Crate { module, attrs, config, span, - exported_macros, items }: Crate, - folder: &mut T) - -> Crate { - let config = folder.fold_meta_items(config); - - let crate_mod = folder.fold_item(hir::Item { - name: keywords::Invalid.name(), - attrs: attrs, - id: DUMMY_NODE_ID, - vis: hir::Public, - span: span, - node: hir::ItemMod(module), - }); - - let (module, attrs, span) = match crate_mod { - hir::Item { attrs, span, node, .. } => { - match node { - hir::ItemMod(m) => (m, attrs, span), - _ => panic!("fold converted a module to not a module"), - } - } - }; - - let items = items.into_iter() - .map(|(id, item)| (id, folder.fold_item(item))) - .collect(); - - Crate { - module: module, - attrs: attrs, - config: config, - span: span, - exported_macros: exported_macros, - items: items, - } -} - -pub fn noop_fold_item_id(i: ItemId, folder: &mut T) -> ItemId { - let id = folder.map_id(i.id); - ItemId { id: id } -} - -// fold one item into one item -pub fn noop_fold_item(item: Item, folder: &mut T) -> Item { - let Item { id, name, attrs, node, vis, span } = item; - let id = folder.new_id(id); - let node = folder.fold_item_underscore(node); - - Item { - id: id, - name: folder.fold_name(name), - attrs: fold_attrs(attrs, folder), - node: node, - vis: vis, - span: folder.new_span(span), - } -} - -pub fn noop_fold_foreign_item(ni: ForeignItem, folder: &mut T) -> ForeignItem { - ForeignItem { - id: folder.new_id(ni.id), - name: folder.fold_name(ni.name), - attrs: fold_attrs(ni.attrs, folder), - node: match ni.node { - ForeignItemFn(fdec, generics) => { - ForeignItemFn(folder.fold_fn_decl(fdec), folder.fold_generics(generics)) - } - ForeignItemStatic(t, m) => { - ForeignItemStatic(folder.fold_ty(t), m) - } - }, - vis: ni.vis, - span: folder.new_span(ni.span), - } -} - -pub fn noop_fold_method_sig(sig: MethodSig, folder: &mut T) -> MethodSig { - MethodSig { - generics: folder.fold_generics(sig.generics), - abi: sig.abi, - unsafety: sig.unsafety, - constness: sig.constness, - decl: folder.fold_fn_decl(sig.decl), - } -} - -pub fn noop_fold_pat(p: P, folder: &mut T) -> P { - p.map(|Pat { id, node, span }| { - Pat { - id: folder.new_id(id), - node: match node { - PatKind::Wild => PatKind::Wild, - PatKind::Binding(binding_mode, pth1, sub) => { - PatKind::Binding(binding_mode, - Spanned { - span: folder.new_span(pth1.span), - node: folder.fold_name(pth1.node), - }, - sub.map(|x| folder.fold_pat(x))) - } - PatKind::Lit(e) => PatKind::Lit(folder.fold_expr(e)), - PatKind::TupleStruct(pth, pats, ddpos) => { - PatKind::TupleStruct(folder.fold_path(pth), - pats.move_map(|x| folder.fold_pat(x)), ddpos) - } - PatKind::Path(opt_qself, pth) => { - let opt_qself = opt_qself.map(|qself| { - QSelf { ty: folder.fold_ty(qself.ty), position: qself.position } - }); - PatKind::Path(opt_qself, folder.fold_path(pth)) - } - PatKind::Struct(pth, fields, etc) => { - let pth = folder.fold_path(pth); - let fs = fields.move_map(|f| { - Spanned { - span: folder.new_span(f.span), - node: hir::FieldPat { - name: f.node.name, - pat: folder.fold_pat(f.node.pat), - is_shorthand: f.node.is_shorthand, - }, - } - }); - PatKind::Struct(pth, fs, etc) - } - PatKind::Tuple(elts, ddpos) => { - PatKind::Tuple(elts.move_map(|x| folder.fold_pat(x)), ddpos) - } - PatKind::Box(inner) => PatKind::Box(folder.fold_pat(inner)), - PatKind::Ref(inner, mutbl) => PatKind::Ref(folder.fold_pat(inner), mutbl), - PatKind::Range(e1, e2) => { - PatKind::Range(folder.fold_expr(e1), folder.fold_expr(e2)) - } - PatKind::Vec(before, slice, after) => { - PatKind::Vec(before.move_map(|x| folder.fold_pat(x)), - slice.map(|x| folder.fold_pat(x)), - after.move_map(|x| folder.fold_pat(x))) - } - }, - span: folder.new_span(span), - } - }) -} - -pub fn noop_fold_expr(Expr { id, node, span, attrs }: Expr, folder: &mut T) -> Expr { - Expr { - id: folder.new_id(id), - node: match node { - ExprBox(e) => { - ExprBox(folder.fold_expr(e)) - } - ExprVec(exprs) => { - ExprVec(exprs.move_map(|x| folder.fold_expr(x))) - } - ExprRepeat(expr, count) => { - ExprRepeat(folder.fold_expr(expr), folder.fold_expr(count)) - } - ExprTup(elts) => ExprTup(elts.move_map(|x| folder.fold_expr(x))), - ExprCall(f, args) => { - ExprCall(folder.fold_expr(f), args.move_map(|x| folder.fold_expr(x))) - } - ExprMethodCall(name, tps, args) => { - ExprMethodCall(respan(folder.new_span(name.span), folder.fold_name(name.node)), - tps.move_map(|x| folder.fold_ty(x)), - args.move_map(|x| folder.fold_expr(x))) - } - ExprBinary(binop, lhs, rhs) => { - ExprBinary(binop, folder.fold_expr(lhs), folder.fold_expr(rhs)) - } - ExprUnary(binop, ohs) => { - ExprUnary(binop, folder.fold_expr(ohs)) - } - ExprLit(l) => ExprLit(l), - ExprCast(expr, ty) => { - ExprCast(folder.fold_expr(expr), folder.fold_ty(ty)) - } - ExprType(expr, ty) => { - ExprType(folder.fold_expr(expr), folder.fold_ty(ty)) - } - ExprAddrOf(m, ohs) => ExprAddrOf(m, folder.fold_expr(ohs)), - ExprIf(cond, tr, fl) => { - ExprIf(folder.fold_expr(cond), - folder.fold_block(tr), - fl.map(|x| folder.fold_expr(x))) - } - ExprWhile(cond, body, opt_name) => { - ExprWhile(folder.fold_expr(cond), - folder.fold_block(body), - opt_name.map(|label| { - respan(folder.new_span(label.span), folder.fold_name(label.node)) - })) - } - ExprLoop(body, opt_name) => { - ExprLoop(folder.fold_block(body), - opt_name.map(|label| { - respan(folder.new_span(label.span), folder.fold_name(label.node)) - })) - } - ExprMatch(expr, arms, source) => { - ExprMatch(folder.fold_expr(expr), - arms.move_map(|x| folder.fold_arm(x)), - source) - } - ExprClosure(capture_clause, decl, body, fn_decl_span) => { - ExprClosure(capture_clause, - folder.fold_fn_decl(decl), - folder.fold_block(body), - folder.new_span(fn_decl_span)) - } - ExprBlock(blk) => ExprBlock(folder.fold_block(blk)), - ExprAssign(el, er) => { - ExprAssign(folder.fold_expr(el), folder.fold_expr(er)) - } - ExprAssignOp(op, el, er) => { - ExprAssignOp(op, folder.fold_expr(el), folder.fold_expr(er)) - } - ExprField(el, name) => { - ExprField(folder.fold_expr(el), - respan(folder.new_span(name.span), folder.fold_name(name.node))) - } - ExprTupField(el, index) => { - ExprTupField(folder.fold_expr(el), - respan(folder.new_span(index.span), folder.fold_usize(index.node))) - } - ExprIndex(el, er) => { - ExprIndex(folder.fold_expr(el), folder.fold_expr(er)) - } - ExprPath(qself, path) => { - let qself = qself.map(|QSelf { ty, position }| { - QSelf { - ty: folder.fold_ty(ty), - position: position, - } - }); - ExprPath(qself, folder.fold_path(path)) - } - ExprBreak(opt_name) => ExprBreak(opt_name.map(|label| { - respan(folder.new_span(label.span), folder.fold_name(label.node)) - })), - ExprAgain(opt_name) => ExprAgain(opt_name.map(|label| { - respan(folder.new_span(label.span), folder.fold_name(label.node)) - })), - ExprRet(e) => ExprRet(e.map(|x| folder.fold_expr(x))), - ExprInlineAsm(asm, outputs, inputs) => { - ExprInlineAsm(asm, - outputs.move_map(|x| folder.fold_expr(x)), - inputs.move_map(|x| folder.fold_expr(x))) - } - ExprStruct(path, fields, maybe_expr) => { - ExprStruct(folder.fold_path(path), - fields.move_map(|x| folder.fold_field(x)), - maybe_expr.map(|x| folder.fold_expr(x))) - } - }, - span: folder.new_span(span), - attrs: fold_attrs(attrs, folder), - } -} - -pub fn noop_fold_stmt(stmt: Stmt, folder: &mut T) -> Stmt { - let span = folder.new_span(stmt.span); - match stmt.node { - StmtDecl(d, id) => { - let id = folder.new_id(id); - Spanned { - node: StmtDecl(folder.fold_decl(d), id), - span: span - } - } - StmtExpr(e, id) => { - let id = folder.new_id(id); - Spanned { - node: StmtExpr(folder.fold_expr(e), id), - span: span, - } - } - StmtSemi(e, id) => { - let id = folder.new_id(id); - Spanned { - node: StmtSemi(folder.fold_expr(e), id), - span: span, - } - } - } -} diff --git a/src/librustc/hir/intravisit.rs b/src/librustc/hir/intravisit.rs index 92b9567888..726e4e53e2 100644 --- a/src/librustc/hir/intravisit.rs +++ b/src/librustc/hir/intravisit.rs @@ -49,8 +49,8 @@ pub enum FnKind<'a> { impl<'a> FnKind<'a> { pub fn attrs(&self) -> &'a [Attribute] { match *self { - FnKind::ItemFn(_, _, _, _, _, _, attrs) => attrs, - FnKind::Method(_, _, _, attrs) => attrs, + FnKind::ItemFn(.., attrs) => attrs, + FnKind::Method(.., attrs) => attrs, FnKind::Closure(attrs) => attrs, } } @@ -341,14 +341,15 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) { visitor.visit_id(item.id); visitor.visit_trait_ref(trait_ref) } - ItemImpl(_, _, ref type_parameters, ref opt_trait_reference, ref typ, ref impl_items) => { + ItemImpl(.., ref type_parameters, ref opt_trait_reference, ref typ, ref impl_items) => { visitor.visit_id(item.id); visitor.visit_generics(type_parameters); walk_list!(visitor, visit_trait_ref, opt_trait_reference); visitor.visit_ty(typ); walk_list!(visitor, visit_impl_item, impl_items); } - ItemStruct(ref struct_definition, ref generics) => { + ItemStruct(ref struct_definition, ref generics) | + ItemUnion(ref struct_definition, ref generics) => { visitor.visit_generics(generics); visitor.visit_id(item.id); visitor.visit_variant_data(struct_definition, item.name, generics, item.id, item.span); @@ -444,12 +445,12 @@ pub fn walk_path<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path) { } } -pub fn walk_path_list_item<'v, V: Visitor<'v>>(visitor: &mut V, - _prefix: &'v Path, - item: &'v PathListItem) { - visitor.visit_id(item.node.id()); - walk_opt_name(visitor, item.span, item.node.name()); - walk_opt_name(visitor, item.span, item.node.rename()); +pub fn walk_path_list_item<'v, V>(visitor: &mut V, _prefix: &'v Path, item: &'v PathListItem) + where V: Visitor<'v>, +{ + visitor.visit_id(item.node.id); + visitor.visit_name(item.span, item.node.name); + walk_opt_name(visitor, item.span, item.node.rename); } pub fn walk_path_segment<'v, V: Visitor<'v>>(visitor: &mut V, @@ -621,10 +622,10 @@ pub fn walk_fn_decl_nopat<'v, V: Visitor<'v>>(visitor: &mut V, function_declarat pub fn walk_fn_kind<'v, V: Visitor<'v>>(visitor: &mut V, function_kind: FnKind<'v>) { match function_kind { - FnKind::ItemFn(_, generics, _, _, _, _, _) => { + FnKind::ItemFn(_, generics, ..) => { visitor.visit_generics(generics); } - FnKind::Method(_, sig, _, _) => { + FnKind::Method(_, sig, ..) => { visitor.visit_generics(&sig.generics); } FnKind::Closure(_) => {} @@ -880,8 +881,8 @@ pub struct IdRange { impl IdRange { pub fn max() -> IdRange { IdRange { - min: u32::MAX, - max: u32::MIN, + min: NodeId::from_u32(u32::MAX), + max: NodeId::from_u32(u32::MIN), } } @@ -895,7 +896,7 @@ impl IdRange { pub fn add(&mut self, id: NodeId) { self.min = cmp::min(self.min, id); - self.max = cmp::max(self.max, id + 1); + self.max = cmp::max(self.max, NodeId::from_u32(id.as_u32() + 1)); } } diff --git a/src/librustc/hir/lowering.rs b/src/librustc/hir/lowering.rs index c2b211238b..29dedeeeb0 100644 --- a/src/librustc/hir/lowering.rs +++ b/src/librustc/hir/lowering.rs @@ -61,7 +61,7 @@ use syntax_pos::Span; pub struct LoweringContext<'a> { crate_root: Option<&'static str>, // Use to assign ids to hir nodes that do not directly correspond to an ast node - sess: Option<&'a Session>, + sess: &'a Session, // As we walk the AST we must keep track of the current 'parent' def id (in // the form of a DefIndex) so that if we create a new node which introduces // a definition, then we can properly create the def id. @@ -81,21 +81,7 @@ pub trait Resolver { // We must keep the set of definitions up to date as we add nodes that weren't in the AST. // This should only return `None` during testing. - fn definitions(&mut self) -> Option<&mut Definitions>; -} - -pub struct DummyResolver; -impl Resolver for DummyResolver { - fn resolve_generated_global_path(&mut self, _path: &hir::Path, _is_value: bool) -> Def { - Def::Err - } - fn get_resolution(&mut self, _id: NodeId) -> Option { - None - } - fn record_resolution(&mut self, _id: NodeId, _def: Def) {} - fn definitions(&mut self) -> Option<&mut Definitions> { - None - } + fn definitions(&mut self) -> &mut Definitions; } pub fn lower_crate(sess: &Session, @@ -115,22 +101,13 @@ pub fn lower_crate(sess: &Session, } else { Some("std") }, - sess: Some(sess), + sess: sess, parent_def: None, resolver: resolver, }.lower_crate(krate) } impl<'a> LoweringContext<'a> { - pub fn testing_context(resolver: &'a mut Resolver) -> Self { - LoweringContext { - crate_root: None, - sess: None, - parent_def: None, - resolver: resolver, - } - } - fn lower_crate(&mut self, c: &Crate) -> hir::Crate { struct ItemLowerer<'lcx, 'interner: 'lcx> { items: BTreeMap, @@ -161,12 +138,11 @@ impl<'a> LoweringContext<'a> { } fn next_id(&self) -> NodeId { - self.sess.map(Session::next_node_id).unwrap_or(0) + self.sess.next_node_id() } fn diagnostic(&self) -> &errors::Handler { - self.sess.map(Session::diagnostic) - .unwrap_or_else(|| panic!("this lowerer cannot emit diagnostics")) + self.sess.diagnostic() } fn str_to_ident(&self, s: &'static str) -> Name { @@ -177,9 +153,9 @@ impl<'a> LoweringContext<'a> { where F: FnOnce(&mut LoweringContext) -> T { let old_def = self.parent_def; - self.parent_def = match self.resolver.definitions() { - Some(defs) => Some(defs.opt_def_index(parent_id).unwrap()), - None => old_def, + self.parent_def = { + let defs = self.resolver.definitions(); + Some(defs.opt_def_index(parent_id).unwrap()) }; let result = f(self); @@ -218,16 +194,10 @@ impl<'a> LoweringContext<'a> { fn lower_path_list_item(&mut self, path_list_ident: &PathListItem) -> hir::PathListItem { Spanned { - node: match path_list_ident.node { - PathListItemKind::Ident { id, name, rename } => hir::PathListIdent { - id: id, - name: name.name, - rename: rename.map(|x| x.name), - }, - PathListItemKind::Mod { id, rename } => hir::PathListMod { - id: id, - rename: rename.map(|x| x.name), - }, + node: hir::PathListItem_ { + id: path_list_ident.node.id, + name: path_list_ident.node.name.name, + rename: path_list_ident.node.rename.map(|rename| rename.name), }, span: path_list_ident.span, } @@ -421,9 +391,18 @@ impl<'a> LoweringContext<'a> { } fn lower_ty_param(&mut self, tp: &TyParam) -> hir::TyParam { + let mut name = tp.ident.name; + + // Don't expose `Self` (recovered "keyword used as ident" parse error). + // `rustc::ty` expects `Self` to be only used for a trait's `Self`. + // Instead, use gensym("Self") to create a distinct name that looks the same. + if name == token::keywords::SelfType.name() { + name = token::gensym("Self"); + } + hir::TyParam { id: tp.id, - name: tp.ident.name, + name: name, bounds: self.lower_bounds(&tp.bounds), default: tp.default.as_ref().map(|x| self.lower_ty(x)), span: tp.span, @@ -466,6 +445,7 @@ impl<'a> LoweringContext<'a> { ty_params: self.lower_ty_params(&g.ty_params), lifetimes: self.lower_lifetime_defs(&g.lifetimes), where_clause: self.lower_where_clause(&g.where_clause), + span: g.span, } } @@ -643,6 +623,10 @@ impl<'a> LoweringContext<'a> { let struct_def = self.lower_variant_data(struct_def); hir::ItemStruct(struct_def, self.lower_generics(generics)) } + ItemKind::Union(ref vdata, ref generics) => { + let vdata = self.lower_variant_data(vdata); + hir::ItemUnion(vdata, self.lower_generics(generics)) + } ItemKind::DefaultImpl(unsafety, ref trait_ref) => { hir::ItemDefaultImpl(self.lower_unsafety(unsafety), self.lower_trait_ref(trait_ref)) @@ -809,8 +793,8 @@ impl<'a> LoweringContext<'a> { } } - fn lower_constness(&mut self, c: Constness) -> hir::Constness { - match c { + fn lower_constness(&mut self, c: Spanned) -> hir::Constness { + match c.node { Constness::Const => hir::Constness::Const, Constness::NotConst => hir::Constness::NotConst, } @@ -966,7 +950,7 @@ impl<'a> LoweringContext<'a> { let inplace_finalize = ["ops", "InPlace", "finalize"]; let make_call = |this: &mut LoweringContext, p, args| { - let path = this.core_path(e.span, p); + let path = this.std_path(e.span, p); let path = this.expr_path(path, ThinVec::new()); this.expr_call(e.span, path, args) }; @@ -1159,15 +1143,13 @@ impl<'a> LoweringContext<'a> { ast_expr: &Expr, path: &[&str], fields: &[(&str, &P)]) -> P { - let strs = this.std_path(&iter::once(&"ops") - .chain(path) - .map(|s| *s) - .collect::>()); - - let structpath = this.path_global(ast_expr.span, strs); + let struct_path = this.std_path(ast_expr.span, + &iter::once(&"ops").chain(path) + .map(|s| *s) + .collect::>()); let hir_expr = if fields.len() == 0 { - this.expr_path(structpath, ast_expr.attrs.clone()) + this.expr_path(struct_path, ast_expr.attrs.clone()) } else { let fields = fields.into_iter().map(|&(s, e)| { let expr = this.lower_expr(&e); @@ -1180,7 +1162,7 @@ impl<'a> LoweringContext<'a> { }).collect(); let attrs = ast_expr.attrs.clone(); - this.expr_struct(ast_expr.span, structpath, fields, None, attrs) + this.expr_struct(ast_expr.span, struct_path, fields, None, attrs) }; this.signal_block_expr(hir_vec![], @@ -1463,11 +1445,7 @@ impl<'a> LoweringContext<'a> { // `match ::std::iter::Iterator::next(&mut iter) { ... }` let match_expr = { - let next_path = { - let strs = self.std_path(&["iter", "Iterator", "next"]); - - self.path_global(e.span, strs) - }; + let next_path = self.std_path(e.span, &["iter", "Iterator", "next"]); let iter = self.expr_ident(e.span, iter, iter_pat.id); let ref_mut_iter = self.expr_mut_addr_of(e.span, iter); let next_path = self.expr_path(next_path, ThinVec::new()); @@ -1494,11 +1472,8 @@ impl<'a> LoweringContext<'a> { // `match ::std::iter::IntoIterator::into_iter() { ... }` let into_iter_expr = { - let into_iter_path = { - let strs = self.std_path(&["iter", "IntoIterator", "into_iter"]); - - self.path_global(e.span, strs) - }; + let into_iter_path = self.std_path(e.span, + &["iter", "IntoIterator", "into_iter"]); let into_iter = self.expr_path(into_iter_path, ThinVec::new()); self.expr_call(e.span, into_iter, hir_vec![head]) @@ -1527,16 +1502,32 @@ impl<'a> LoweringContext<'a> { // to: // // { - // match { + // match { Carrier::translate( { } ) } { // Ok(val) => val, - // Err(err) => { - // return Err(From::from(err)) - // } + // Err(err) => { return Carrier::from_error(From::from(err)); } // } // } - // expand - let sub_expr = self.lower_expr(sub_expr); + // { Carrier::translate( { } ) } + let discr = { + // expand + let sub_expr = self.lower_expr(sub_expr); + let sub_expr = self.signal_block_expr(hir_vec![], + sub_expr, + e.span, + hir::PopUnstableBlock, + ThinVec::new()); + + let path = self.std_path(e.span, &["ops", "Carrier", "translate"]); + let path = self.expr_path(path, ThinVec::new()); + let call = self.expr_call(e.span, path, hir_vec![sub_expr]); + + self.signal_block_expr(hir_vec![], + call, + e.span, + hir::PushUnstableBlock, + ThinVec::new()) + }; // Ok(val) => val let ok_arm = { @@ -1548,32 +1539,35 @@ impl<'a> LoweringContext<'a> { self.arm(hir_vec![ok_pat], val_expr) }; - // Err(err) => return Err(From::from(err)) + // Err(err) => { return Carrier::from_error(From::from(err)); } let err_arm = { let err_ident = self.str_to_ident("err"); let err_local = self.pat_ident(e.span, err_ident); let from_expr = { - let path = self.std_path(&["convert", "From", "from"]); - let path = self.path_global(e.span, path); + let path = self.std_path(e.span, &["convert", "From", "from"]); let from = self.expr_path(path, ThinVec::new()); let err_expr = self.expr_ident(e.span, err_ident, err_local.id); self.expr_call(e.span, from, hir_vec![err_expr]) }; - let err_expr = { - let path = self.std_path(&["result", "Result", "Err"]); - let path = self.path_global(e.span, path); - let err_ctor = self.expr_path(path, ThinVec::new()); - self.expr_call(e.span, err_ctor, hir_vec![from_expr]) + let from_err_expr = { + let path = self.std_path(e.span, &["ops", "Carrier", "from_error"]); + let from_err = self.expr_path(path, ThinVec::new()); + self.expr_call(e.span, from_err, hir_vec![from_expr]) }; - let err_pat = self.pat_err(e.span, err_local); + let ret_expr = self.expr(e.span, - hir::Expr_::ExprRet(Some(err_expr)), - ThinVec::new()); - self.arm(hir_vec![err_pat], ret_expr) + hir::Expr_::ExprRet(Some(from_err_expr)), + ThinVec::new()); + let ret_stmt = self.stmt_expr(ret_expr); + let block = self.signal_block_stmt(ret_stmt, e.span, + hir::PushUnstableBlock, ThinVec::new()); + + let err_pat = self.pat_err(e.span, err_local); + self.arm(hir_vec![err_pat], block) }; - return self.expr_match(e.span, sub_expr, hir_vec![err_arm, ok_arm], + return self.expr_match(e.span, discr, hir_vec![err_arm, ok_arm], hir::MatchSource::TryDesugar); } @@ -1710,9 +1704,10 @@ impl<'a> LoweringContext<'a> { let expr_path = hir::ExprPath(None, self.path_ident(span, id)); let expr = self.expr(span, expr_path, ThinVec::new()); - let def = self.resolver.definitions().map(|defs| { - Def::Local(defs.local_def_id(binding), binding) - }).unwrap_or(Def::Err); + let def = { + let defs = self.resolver.definitions(); + Def::Local(defs.local_def_id(binding)) + }; self.resolver.record_resolution(expr.id, def); expr @@ -1787,6 +1782,15 @@ impl<'a> LoweringContext<'a> { (respan(sp, hir::StmtDecl(P(decl), self.next_id())), pat_id) } + // Turns `` into `;`, note that this produces a StmtSemi, not a + // StmtExpr. + fn stmt_expr(&self, expr: P) -> hir::Stmt { + hir::Stmt { + span: expr.span, + node: hir::StmtSemi(expr, self.next_id()), + } + } + fn block_expr(&mut self, expr: P) -> P { self.block_all(expr.span, hir::HirVec::new(), Some(expr)) } @@ -1803,26 +1807,22 @@ impl<'a> LoweringContext<'a> { } fn pat_ok(&mut self, span: Span, pat: P) -> P { - let ok = self.std_path(&["result", "Result", "Ok"]); - let path = self.path_global(span, ok); + let path = self.std_path(span, &["result", "Result", "Ok"]); self.pat_enum(span, path, hir_vec![pat]) } fn pat_err(&mut self, span: Span, pat: P) -> P { - let err = self.std_path(&["result", "Result", "Err"]); - let path = self.path_global(span, err); + let path = self.std_path(span, &["result", "Result", "Err"]); self.pat_enum(span, path, hir_vec![pat]) } fn pat_some(&mut self, span: Span, pat: P) -> P { - let some = self.std_path(&["option", "Option", "Some"]); - let path = self.path_global(span, some); + let path = self.std_path(span, &["option", "Option", "Some"]); self.pat_enum(span, path, hir_vec![pat]) } fn pat_none(&mut self, span: Span) -> P { - let none = self.std_path(&["option", "Option", "None"]); - let path = self.path_global(span, none); + let path = self.std_path(span, &["option", "Option", "None"]); self.pat_enum(span, path, hir_vec![]) } @@ -1855,11 +1855,12 @@ impl<'a> LoweringContext<'a> { let pat = self.pat(span, pat_ident); let parent_def = self.parent_def; - let def = self.resolver.definitions().map(|defs| { + let def = { + let defs = self.resolver.definitions(); let def_path_data = DefPathData::Binding(name.as_str()); let def_index = defs.create_def_with_parent(parent_def, pat.id, def_path_data); - Def::Local(DefId::local(def_index), pat.id) - }).unwrap_or(Def::Err); + Def::Local(DefId::local(def_index)) + }; self.resolver.record_resolution(pat.id, def); pat @@ -1920,7 +1921,7 @@ impl<'a> LoweringContext<'a> { } } - fn std_path(&mut self, components: &[&str]) -> Vec { + fn std_path_components(&mut self, components: &[&str]) -> Vec { let mut v = Vec::new(); if let Some(s) = self.crate_root { v.push(token::intern(s)); @@ -1931,8 +1932,8 @@ impl<'a> LoweringContext<'a> { // Given suffix ["b","c","d"], returns path `::std::b::c::d` when // `fld.cx.use_std`, and `::core::b::c::d` otherwise. - fn core_path(&mut self, span: Span, components: &[&str]) -> hir::Path { - let idents = self.std_path(components); + fn std_path(&mut self, span: Span, components: &[&str]) -> hir::Path { + let idents = self.std_path_components(components); self.path_global(span, idents) } @@ -1953,4 +1954,21 @@ impl<'a> LoweringContext<'a> { }); self.expr_block(block, attrs) } + + fn signal_block_stmt(&mut self, + stmt: hir::Stmt, + span: Span, + rule: hir::BlockCheckMode, + attrs: ThinVec) + -> P { + let id = self.next_id(); + let block = P(hir::Block { + rules: rule, + span: span, + id: id, + stmts: hir_vec![stmt], + expr: None, + }); + self.expr_block(block, attrs) + } } diff --git a/src/librustc/hir/map/blocks.rs b/src/librustc/hir/map/blocks.rs index 50e8c6e7ab..4487234885 100644 --- a/src/librustc/hir/map/blocks.rs +++ b/src/librustc/hir/map/blocks.rs @@ -23,13 +23,13 @@ pub use self::Code::*; +use hir as ast; use hir::map::{self, Node}; -use syntax::abi; use hir::{Block, FnDecl}; +use hir::intravisit::FnKind; +use syntax::abi; use syntax::ast::{Attribute, Name, NodeId}; -use hir as ast; use syntax_pos::Span; -use hir::intravisit::FnKind; /// An FnLikeNode is a Node that is like a fn, in that it has a decl /// and a body (as well as a NodeId, a span, etc). diff --git a/src/librustc/hir/map/collector.rs b/src/librustc/hir/map/collector.rs index b70190181a..3d9031a136 100644 --- a/src/librustc/hir/map/collector.rs +++ b/src/librustc/hir/map/collector.rs @@ -27,6 +27,10 @@ pub struct NodeCollector<'ast> { pub map: Vec>, /// The parent of this node pub parent_node: NodeId, + /// If true, completely ignore nested items. We set this when loading + /// HIR from metadata, since in that case we only want the HIR for + /// one specific item (and not the ones nested inside of it). + pub ignore_nested_items: bool } impl<'ast> NodeCollector<'ast> { @@ -35,6 +39,7 @@ impl<'ast> NodeCollector<'ast> { krate: krate, map: vec![], parent_node: CRATE_NODE_ID, + ignore_nested_items: false }; collector.insert_entry(CRATE_NODE_ID, RootCrate); @@ -52,6 +57,7 @@ impl<'ast> NodeCollector<'ast> { krate: krate, map: map, parent_node: parent_node, + ignore_nested_items: true }; assert_eq!(parent_def_path.krate, parent_def_id.krate); @@ -63,10 +69,10 @@ impl<'ast> NodeCollector<'ast> { fn insert_entry(&mut self, id: NodeId, entry: MapEntry<'ast>) { debug!("ast_map: {:?} => {:?}", id, entry); let len = self.map.len(); - if id as usize >= len { - self.map.extend(repeat(NotPresent).take(id as usize - len + 1)); + if id.as_usize() >= len { + self.map.extend(repeat(NotPresent).take(id.as_usize() - len + 1)); } - self.map[id as usize] = entry; + self.map[id.as_usize()] = entry; } fn insert(&mut self, id: NodeId, node: Node<'ast>) { @@ -88,7 +94,9 @@ impl<'ast> Visitor<'ast> for NodeCollector<'ast> { /// their outer items. fn visit_nested_item(&mut self, item: ItemId) { debug!("visit_nested_item: {:?}", item); - self.visit_item(self.krate.item(item.id)) + if !self.ignore_nested_items { + self.visit_item(self.krate.item(item.id)) + } } fn visit_item(&mut self, i: &'ast Item) { @@ -109,7 +117,7 @@ impl<'ast> Visitor<'ast> for NodeCollector<'ast> { this.insert(struct_def.id(), NodeStructCtor(struct_def)); } } - ItemTrait(_, _, ref bounds, _) => { + ItemTrait(.., ref bounds, _) => { for b in bounds.iter() { if let TraitTyParamBound(ref t, TraitBoundModifier::None) = *b { this.insert(t.trait_ref.ref_id, NodeItem(i)); @@ -120,7 +128,7 @@ impl<'ast> Visitor<'ast> for NodeCollector<'ast> { match view_path.node { ViewPathList(_, ref paths) => { for path in paths { - this.insert(path.node.id(), NodeItem(i)); + this.insert(path.node.id, NodeItem(i)); } } _ => () diff --git a/src/librustc/hir/map/def_collector.rs b/src/librustc/hir/map/def_collector.rs index 752b0e9a25..b0a717e18f 100644 --- a/src/librustc/hir/map/def_collector.rs +++ b/src/librustc/hir/map/def_collector.rs @@ -30,19 +30,12 @@ pub struct DefCollector<'ast> { } impl<'ast> DefCollector<'ast> { - pub fn root(definitions: &'ast mut Definitions) -> DefCollector<'ast> { - let mut collector = DefCollector { + pub fn new(definitions: &'ast mut Definitions) -> DefCollector<'ast> { + DefCollector { hir_crate: None, definitions: definitions, parent_def: None, - }; - let root = collector.create_def_with_parent(None, CRATE_NODE_ID, DefPathData::CrateRoot); - assert_eq!(root, CRATE_DEF_INDEX); - collector.parent_def = Some(root); - - collector.create_def_with_parent(Some(CRATE_DEF_INDEX), DUMMY_NODE_ID, DefPathData::Misc); - - collector + } } pub fn extend(parent_node: NodeId, @@ -50,11 +43,7 @@ impl<'ast> DefCollector<'ast> { parent_def_id: DefId, definitions: &'ast mut Definitions) -> DefCollector<'ast> { - let mut collector = DefCollector { - hir_crate: None, - parent_def: None, - definitions: definitions, - }; + let mut collector = DefCollector::new(definitions); assert_eq!(parent_def_path.krate, parent_def_id.krate); let root_path = Box::new(InlinedRootPath { @@ -68,17 +57,21 @@ impl<'ast> DefCollector<'ast> { collector } + pub fn collect_root(&mut self) { + let root = self.create_def_with_parent(None, CRATE_NODE_ID, DefPathData::CrateRoot); + assert_eq!(root, CRATE_DEF_INDEX); + self.parent_def = Some(root); + + self.create_def_with_parent(Some(CRATE_DEF_INDEX), DUMMY_NODE_ID, DefPathData::Misc); + } + pub fn walk_item(&mut self, ii: &'ast InlinedItem, krate: &'ast hir::Crate) { self.hir_crate = Some(krate); ii.visit(self); } - fn parent_def(&self) -> Option { - self.parent_def - } - fn create_def(&mut self, node_id: NodeId, data: DefPathData) -> DefIndex { - let parent_def = self.parent_def(); + let parent_def = self.parent_def; debug!("create_def(node_id={:?}, data={:?}, parent_def={:?})", node_id, data, parent_def); self.definitions.create_def_with_parent(parent_def, node_id, data) } @@ -133,7 +126,7 @@ impl<'ast> visit::Visitor for DefCollector<'ast> { let def_data = match i.node { ItemKind::DefaultImpl(..) | ItemKind::Impl(..) => DefPathData::Impl, - ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Trait(..) | + ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) | ItemKind::Trait(..) | ItemKind::ExternCrate(..) | ItemKind::ForeignMod(..) | ItemKind::Ty(..) => DefPathData::TypeNs(i.ident.name.as_str()), ItemKind::Mod(..) => DefPathData::Module(i.ident.name.as_str()), @@ -164,7 +157,7 @@ impl<'ast> visit::Visitor for DefCollector<'ast> { }); } } - ItemKind::Struct(ref struct_def, _) => { + ItemKind::Struct(ref struct_def, _) | ItemKind::Union(ref struct_def, _) => { // If this is a tuple-like struct, register the constructor. if !struct_def.is_struct() { this.create_def(struct_def.id(), @@ -285,15 +278,6 @@ impl<'ast> visit::Visitor for DefCollector<'ast> { // We walk the HIR rather than the AST when reading items from metadata. impl<'ast> intravisit::Visitor<'ast> for DefCollector<'ast> { - /// Because we want to track parent items and so forth, enable - /// deep walking so that we walk nested items in the context of - /// their outer items. - fn visit_nested_item(&mut self, item_id: hir::ItemId) { - debug!("visit_nested_item: {:?}", item_id); - let item = self.hir_crate.unwrap().item(item_id.id); - self.visit_item(item) - } - fn visit_item(&mut self, i: &'ast hir::Item) { debug!("visit_item: {:?}", i); @@ -302,9 +286,9 @@ impl<'ast> intravisit::Visitor<'ast> for DefCollector<'ast> { let def_data = match i.node { hir::ItemDefaultImpl(..) | hir::ItemImpl(..) => DefPathData::Impl, - hir::ItemEnum(..) | hir::ItemStruct(..) | hir::ItemTrait(..) | - hir::ItemExternCrate(..) | hir::ItemMod(..) | hir::ItemForeignMod(..) | - hir::ItemTy(..) => + hir::ItemEnum(..) | hir::ItemStruct(..) | hir::ItemUnion(..) | + hir::ItemTrait(..) | hir::ItemExternCrate(..) | hir::ItemMod(..) | + hir::ItemForeignMod(..) | hir::ItemTy(..) => DefPathData::TypeNs(i.name.as_str()), hir::ItemStatic(..) | hir::ItemConst(..) | hir::ItemFn(..) => DefPathData::ValueNs(i.name.as_str()), @@ -331,7 +315,8 @@ impl<'ast> intravisit::Visitor<'ast> for DefCollector<'ast> { }); } } - hir::ItemStruct(ref struct_def, _) => { + hir::ItemStruct(ref struct_def, _) | + hir::ItemUnion(ref struct_def, _) => { // If this is a tuple-like struct, register the constructor. if !struct_def.is_struct() { this.create_def(struct_def.id(), diff --git a/src/librustc/hir/map/definitions.rs b/src/librustc/hir/map/definitions.rs index 901a489728..3e77c09139 100644 --- a/src/librustc/hir/map/definitions.rs +++ b/src/librustc/hir/map/definitions.rs @@ -8,14 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::cstore::LOCAL_CRATE; -use hir::def_id::{DefId, DefIndex}; +use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE}; use hir::map::def_collector::DefCollector; use rustc_data_structures::fnv::FnvHashMap; use std::fmt::Write; -use std::hash::{Hash, Hasher, SipHasher}; +use std::hash::{Hash, Hasher}; +use std::collections::hash_map::DefaultHasher; use syntax::{ast, visit}; -use syntax::parse::token::InternedString; +use syntax::parse::token::{self, InternedString}; use ty::TyCtxt; use util::nodemap::NodeMap; @@ -70,7 +70,7 @@ pub struct DefPath { pub data: Vec, /// what krate root is this path relative to? - pub krate: ast::CrateNum, + pub krate: CrateNum, } impl DefPath { @@ -78,7 +78,7 @@ impl DefPath { self.krate == LOCAL_CRATE } - pub fn make(start_krate: ast::CrateNum, + pub fn make(start_krate: CrateNum, start_index: DefIndex, mut get_key: FN) -> DefPath where FN: FnMut(DefIndex) -> DefKey @@ -116,11 +116,7 @@ impl DefPath { pub fn to_string(&self, tcx: TyCtxt) -> String { let mut s = String::with_capacity(self.data.len() * 16); - if self.krate == LOCAL_CRATE { - s.push_str(&tcx.crate_name(self.krate)); - } else { - s.push_str(&tcx.sess.cstore.original_crate_name(self.krate)); - } + s.push_str(&tcx.original_crate_name(self.krate)); s.push_str("/"); s.push_str(&tcx.crate_disambiguator(self.krate)); @@ -136,13 +132,13 @@ impl DefPath { } pub fn deterministic_hash(&self, tcx: TyCtxt) -> u64 { - let mut state = SipHasher::new(); + let mut state = DefaultHasher::new(); self.deterministic_hash_to(tcx, &mut state); state.finish() } pub fn deterministic_hash_to(&self, tcx: TyCtxt, state: &mut H) { - tcx.crate_name(self.krate).hash(state); + tcx.original_crate_name(self.krate).hash(state); tcx.crate_disambiguator(self.krate).hash(state); self.data.hash(state); } @@ -230,7 +226,8 @@ impl Definitions { } pub fn collect(&mut self, krate: &ast::Crate) { - let mut def_collector = DefCollector::root(self); + let mut def_collector = DefCollector::new(self); + def_collector.collect_root(); visit::walk_crate(&mut def_collector, krate); } @@ -327,6 +324,30 @@ impl Definitions { } impl DefPathData { + pub fn get_opt_name(&self) -> Option { + use self::DefPathData::*; + match *self { + TypeNs(ref name) | + ValueNs(ref name) | + Module(ref name) | + MacroDef(ref name) | + TypeParam(ref name) | + LifetimeDef(ref name) | + EnumVariant(ref name) | + Binding(ref name) | + Field(ref name) => Some(token::intern(name)), + + Impl | + CrateRoot | + InlinedRoot(_) | + Misc | + ClosureExpr | + StructCtor | + Initializer | + ImplTrait => None + } + } + pub fn as_interned_str(&self) -> InternedString { use self::DefPathData::*; match *self { diff --git a/src/librustc/hir/map/mod.rs b/src/librustc/hir/map/mod.rs index 7e82a4a05a..b351bd427a 100644 --- a/src/librustc/hir/map/mod.rs +++ b/src/librustc/hir/map/mod.rs @@ -22,17 +22,15 @@ use middle::cstore::InlinedItem as II; use hir::def_id::{CRATE_DEF_INDEX, DefId, DefIndex}; use syntax::abi::Abi; -use syntax::ast::{self, Name, NodeId, DUMMY_NODE_ID, }; +use syntax::ast::{self, Name, NodeId, CRATE_NODE_ID}; use syntax::codemap::Spanned; use syntax_pos::Span; use hir::*; -use hir::fold::Folder; use hir::print as pprust; use arena::TypedArena; use std::cell::RefCell; -use std::cmp; use std::io; use std::mem; @@ -240,7 +238,7 @@ impl<'ast> Map<'ast> { let mut id = id0; if !self.is_inlined_node_id(id) { loop { - match map[id as usize] { + match map[id.as_usize()] { EntryItem(_, item) => { let def_id = self.local_def_id(item.id); // NB ^~~~~~~ @@ -295,7 +293,7 @@ impl<'ast> Map<'ast> { // reading from an inlined def-id is really a read out of // the metadata from which we loaded the item. loop { - match map[id as usize] { + match map[id.as_usize()] { EntryItem(p, _) | EntryForeignItem(p, _) | EntryTraitItem(p, _) | @@ -315,8 +313,7 @@ impl<'ast> Map<'ast> { RootInlinedParent(parent) => match *parent { InlinedItem::Item(def_id, _) | InlinedItem::TraitItem(def_id, _) | - InlinedItem::ImplItem(def_id, _) | - InlinedItem::Foreign(def_id, _) => + InlinedItem::ImplItem(def_id, _) => return DepNode::MetaData(def_id) }, @@ -374,7 +371,7 @@ impl<'ast> Map<'ast> { } fn find_entry(&self, id: NodeId) -> Option> { - self.map.borrow().get(id as usize).cloned() + self.map.borrow().get(id.as_usize()).cloned() } pub fn krate(&self) -> &'ast Crate { @@ -457,8 +454,8 @@ impl<'ast> Map<'ast> { let mut id = start_id; loop { let parent_node = self.get_parent_node(id); - if parent_node == 0 { - return Ok(0); + if parent_node == CRATE_NODE_ID { + return Ok(CRATE_NODE_ID); } if parent_node == id { return Err(id); @@ -569,6 +566,13 @@ impl<'ast> Map<'ast> { } } + pub fn expect_impl_item(&self, id: NodeId) -> &'ast ImplItem { + match self.find(id) { + Some(NodeImplItem(item)) => item, + _ => bug!("expected impl item, found {}", self.node_to_string(id)) + } + } + pub fn expect_trait_item(&self, id: NodeId) -> &'ast TraitItem { match self.find(id) { Some(NodeTraitItem(item)) => item, @@ -576,22 +580,24 @@ impl<'ast> Map<'ast> { } } - pub fn expect_struct(&self, id: NodeId) -> &'ast VariantData { + pub fn expect_variant_data(&self, id: NodeId) -> &'ast VariantData { match self.find(id) { Some(NodeItem(i)) => { match i.node { - ItemStruct(ref struct_def, _) => struct_def, - _ => bug!("struct ID bound to non-struct") + ItemStruct(ref struct_def, _) | + ItemUnion(ref struct_def, _) => struct_def, + _ => { + bug!("struct ID bound to non-struct {}", + self.node_to_string(id)); + } } } - Some(NodeVariant(variant)) => { - if variant.node.data.is_struct() { - &variant.node.data - } else { - bug!("struct ID bound to enum variant that isn't struct-like") - } + Some(NodeStructCtor(data)) => data, + Some(NodeVariant(variant)) => &variant.node.data, + _ => { + bug!("expected struct or variant, found {}", + self.node_to_string(id)); } - _ => bug!("expected struct, found {}", self.node_to_string(id)), } } @@ -674,7 +680,7 @@ impl<'ast> Map<'ast> { map: self, item_name: parts.last().unwrap(), in_which: &parts[..parts.len() - 1], - idx: 0, + idx: CRATE_NODE_ID, } } @@ -795,10 +801,10 @@ impl<'a, 'ast> Iterator for NodesMatchingSuffix<'a, 'ast> { fn next(&mut self) -> Option { loop { let idx = self.idx; - if idx as usize >= self.map.entry_count() { + if idx.as_usize() >= self.map.entry_count() { return None; } - self.idx += 1; + self.idx = NodeId::from_u32(self.idx.as_u32() + 1); let name = match self.map.find_entry(idx) { Some(EntryItem(_, n)) => n.name(), Some(EntryForeignItem(_, n))=> n.name(), @@ -826,57 +832,6 @@ impl Named for Variant_ { fn name(&self) -> Name { self.name } } impl Named for TraitItem { fn name(&self) -> Name { self.name } } impl Named for ImplItem { fn name(&self) -> Name { self.name } } -pub trait FoldOps { - fn new_id(&self, id: NodeId) -> NodeId { - id - } - fn new_def_id(&self, def_id: DefId) -> DefId { - def_id - } - fn new_span(&self, span: Span) -> Span { - span - } -} - -/// A Folder that updates IDs and Span's according to fold_ops. -pub struct IdAndSpanUpdater { - fold_ops: F, - min_id_assigned: NodeId, - max_id_assigned: NodeId, -} - -impl IdAndSpanUpdater { - pub fn new(fold_ops: F) -> IdAndSpanUpdater { - IdAndSpanUpdater { - fold_ops: fold_ops, - min_id_assigned: ::std::u32::MAX, - max_id_assigned: ::std::u32::MIN, - } - } - - pub fn id_range(&self) -> intravisit::IdRange { - intravisit::IdRange { - min: self.min_id_assigned, - max: self.max_id_assigned + 1, - } - } -} - -impl Folder for IdAndSpanUpdater { - fn new_id(&mut self, id: NodeId) -> NodeId { - let id = self.fold_ops.new_id(id); - - self.min_id_assigned = cmp::min(self.min_id_assigned, id); - self.max_id_assigned = cmp::max(self.max_id_assigned, id); - - id - } - - fn new_span(&mut self, span: Span) -> Span { - self.fold_ops.new_span(span) - } -} - pub fn map_crate<'ast>(forest: &'ast mut Forest, definitions: Definitions) -> Map<'ast> { @@ -900,7 +855,7 @@ pub fn map_crate<'ast>(forest: &'ast mut Forest, entries, vector_length, (entries as f64 / vector_length as f64) * 100.); } - let local_node_id_watermark = map.len() as NodeId; + let local_node_id_watermark = NodeId::new(map.len()); let local_def_id_watermark = definitions.len(); Map { @@ -915,36 +870,15 @@ pub fn map_crate<'ast>(forest: &'ast mut Forest, /// Used for items loaded from external crate that are being inlined into this /// crate. -pub fn map_decoded_item<'ast, F: FoldOps>(map: &Map<'ast>, - parent_def_path: DefPath, - parent_def_id: DefId, - ii: InlinedItem, - fold_ops: F) - -> &'ast InlinedItem { - let mut fld = IdAndSpanUpdater::new(fold_ops); - let ii = match ii { - II::Item(d, i) => II::Item(fld.fold_ops.new_def_id(d), - i.map(|i| fld.fold_item(i))), - II::TraitItem(d, ti) => { - II::TraitItem(fld.fold_ops.new_def_id(d), - ti.map(|ti| fld.fold_trait_item(ti))) - } - II::ImplItem(d, ii) => { - II::ImplItem(fld.fold_ops.new_def_id(d), - ii.map(|ii| fld.fold_impl_item(ii))) - } - II::Foreign(d, i) => II::Foreign(fld.fold_ops.new_def_id(d), - i.map(|i| fld.fold_foreign_item(i))) - }; +pub fn map_decoded_item<'ast>(map: &Map<'ast>, + parent_def_path: DefPath, + parent_def_id: DefId, + ii: InlinedItem, + ii_parent_id: NodeId) + -> &'ast InlinedItem { + let _ignore = map.forest.dep_graph.in_ignore(); let ii = map.forest.inlined_items.alloc(ii); - let ii_parent_id = fld.new_id(DUMMY_NODE_ID); - - // Assert that the ii_parent_id is the last NodeId in our reserved range - assert!(ii_parent_id == fld.max_id_assigned); - // Assert that we did not violate the invariant that all inlined HIR items - // have NodeIds greater than or equal to `local_node_id_watermark` - assert!(fld.min_id_assigned >= map.local_node_id_watermark); let defs = &mut *map.definitions.borrow_mut(); let mut def_collector = DefCollector::extend(ii_parent_id, @@ -1026,6 +960,7 @@ fn node_id_to_string(map: &Map, id: NodeId, include_id: bool) -> String { ItemTy(..) => "ty", ItemEnum(..) => "enum", ItemStruct(..) => "struct", + ItemUnion(..) => "union", ItemTrait(..) => "trait", ItemImpl(..) => "impl", ItemDefaultImpl(..) => "default impl", diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs index d41cdfabdf..0cfdbae1a5 100644 --- a/src/librustc/hir/mod.rs +++ b/src/librustc/hir/mod.rs @@ -20,7 +20,6 @@ pub use self::FunctionRetTy::*; pub use self::ForeignItem_::*; pub use self::Item_::*; pub use self::Mutability::*; -pub use self::PathListItem_::*; pub use self::PrimTy::*; pub use self::Stmt_::*; pub use self::TraitItem_::*; @@ -36,7 +35,7 @@ use hir::def::Def; use hir::def_id::DefId; use util::nodemap::{NodeMap, FnvHashSet}; -use syntax_pos::{BytePos, mk_sp, Span, ExpnId}; +use syntax_pos::{mk_sp, Span, ExpnId, DUMMY_SP}; use syntax::codemap::{self, respan, Spanned}; use syntax::abi::Abi; use syntax::ast::{Name, NodeId, DUMMY_NODE_ID, AsmDialect}; @@ -68,7 +67,6 @@ macro_rules! hir_vec { pub mod check_attr; pub mod def; pub mod def_id; -pub mod fold; pub mod intravisit; pub mod lowering; pub mod map; @@ -301,6 +299,7 @@ pub struct Generics { pub lifetimes: HirVec, pub ty_params: HirVec, pub where_clause: WhereClause, + pub span: Span, } impl Generics { @@ -312,6 +311,7 @@ impl Generics { id: DUMMY_NODE_ID, predicates: HirVec::new(), }, + span: DUMMY_SP, } } @@ -326,38 +326,6 @@ impl Generics { pub fn is_parameterized(&self) -> bool { self.is_lt_parameterized() || self.is_type_parameterized() } - - // Does return a span which includes lifetimes and type parameters, - // not where clause. - pub fn span(&self) -> Option { - if !self.is_parameterized() { - None - } else { - let mut span: Option = None; - for lifetime in self.lifetimes.iter() { - if let Some(ref mut span) = span { - let life_span = lifetime.lifetime.span; - span.hi = if span.hi > life_span.hi { span.hi } else { life_span.hi }; - span.lo = if span.lo < life_span.lo { span.lo } else { life_span.lo }; - } else { - span = Some(lifetime.lifetime.span.clone()); - } - } - for ty_param in self.ty_params.iter() { - if let Some(ref mut span) = span { - span.lo = if span.lo < ty_param.span.lo { span.lo } else { ty_param.span.lo }; - span.hi = if span.hi > ty_param.span.hi { span.hi } else { ty_param.span.hi }; - } else { - span = Some(ty_param.span.clone()); - } - } - if let Some(ref mut span) = span { - span.lo = span.lo - BytePos(1); - span.hi = span.hi + BytePos(1); - } - span - } - } } /// A `where` clause in a definition @@ -500,7 +468,7 @@ impl Pat { } match self.node { - PatKind::Binding(_, _, Some(ref p)) => p.walk_(it), + PatKind::Binding(.., Some(ref p)) => p.walk_(it), PatKind::Struct(_, ref fields, _) => { fields.iter().all(|field| field.node.pat.walk_(it)) } @@ -517,7 +485,7 @@ impl Pat { } PatKind::Wild | PatKind::Lit(_) | - PatKind::Range(_, _) | + PatKind::Range(..) | PatKind::Binding(..) | PatKind::Path(..) => { true @@ -1337,39 +1305,11 @@ pub struct Variant_ { pub type Variant = Spanned; #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum PathListItem_ { - PathListIdent { - name: Name, - /// renamed in list, eg `use foo::{bar as baz};` - rename: Option, - id: NodeId, - }, - PathListMod { - /// renamed in list, eg `use foo::{self as baz};` - rename: Option, - id: NodeId, - }, -} - -impl PathListItem_ { - pub fn id(&self) -> NodeId { - match *self { - PathListIdent { id, .. } | PathListMod { id, .. } => id, - } - } - - pub fn name(&self) -> Option { - match *self { - PathListIdent { name, .. } => Some(name), - PathListMod { .. } => None, - } - } - - pub fn rename(&self) -> Option { - match *self { - PathListIdent { rename, .. } | PathListMod { rename, .. } => rename, - } - } +pub struct PathListItem_ { + pub name: Name, + /// renamed in list, eg `use foo::{bar as baz};` + pub rename: Option, + pub id: NodeId, } pub type PathListItem = Spanned; @@ -1542,6 +1482,8 @@ pub enum Item_ { ItemEnum(EnumDef, Generics), /// A struct definition, e.g. `struct Foo {x: A}` ItemStruct(VariantData, Generics), + /// A union definition, e.g. `union Foo {x: A, y: B}` + ItemUnion(VariantData, Generics), /// Represents a Trait Declaration ItemTrait(Unsafety, Generics, TyParamBounds, HirVec), @@ -1571,6 +1513,7 @@ impl Item_ { ItemTy(..) => "type alias", ItemEnum(..) => "enum", ItemStruct(..) => "struct", + ItemUnion(..) => "union", ItemTrait(..) => "trait", ItemImpl(..) | ItemDefaultImpl(..) => "item", @@ -1621,7 +1564,7 @@ pub type FreevarMap = NodeMap>; pub type CaptureModeMap = NodeMap; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct TraitCandidate { pub def_id: DefId, pub import_id: Option, diff --git a/src/librustc/hir/pat_util.rs b/src/librustc/hir/pat_util.rs index 593d10ef4f..dec41fdfc3 100644 --- a/src/librustc/hir/pat_util.rs +++ b/src/librustc/hir/pat_util.rs @@ -53,7 +53,7 @@ impl EnumerateAndAdjustIterator for T { pub fn pat_is_refutable(dm: &DefMap, pat: &hir::Pat) -> bool { match pat.node { - PatKind::Lit(_) | PatKind::Range(_, _) | PatKind::Path(Some(..), _) => true, + PatKind::Lit(_) | PatKind::Range(..) | PatKind::Path(Some(..), _) => true, PatKind::TupleStruct(..) | PatKind::Path(..) | PatKind::Struct(..) => { @@ -62,7 +62,7 @@ pub fn pat_is_refutable(dm: &DefMap, pat: &hir::Pat) -> bool { _ => false } } - PatKind::Vec(_, _, _) => true, + PatKind::Vec(..) => true, _ => false } } @@ -174,7 +174,7 @@ pub fn necessary_variants(dm: &DefMap, pat: &hir::Pat) -> Vec { PatKind::Path(..) | PatKind::Struct(..) => { match dm.get(&p.id) { - Some(&PathResolution { base_def: Def::Variant(_, id), .. }) => { + Some(&PathResolution { base_def: Def::Variant(id), .. }) => { variants.push(id); } _ => () diff --git a/src/librustc/hir/print.rs b/src/librustc/hir/print.rs index 66c1bc7642..eebc8fa9e5 100644 --- a/src/librustc/hir/print.rs +++ b/src/librustc/hir/print.rs @@ -523,6 +523,7 @@ impl<'a> State<'a> { id: ast::DUMMY_NODE_ID, predicates: hir::HirVec::new(), }, + span: syntax_pos::DUMMY_SP, }; self.print_ty_fn(f.abi, f.unsafety, &f.decl, None, &generics)?; } @@ -751,7 +752,10 @@ impl<'a> State<'a> { self.head(&visibility_qualified(&item.vis, "struct"))?; self.print_struct(struct_def, generics, item.name, item.span, true)?; } - + hir::ItemUnion(ref struct_def, ref generics) => { + self.head(&visibility_qualified(&item.vis, "union"))?; + self.print_struct(struct_def, generics, item.name, item.span, true)?; + } hir::ItemDefaultImpl(unsafety, ref trait_ref) => { self.head("")?; self.print_visibility(&item.vis)?; @@ -1752,9 +1756,9 @@ impl<'a> State<'a> { self.commasep(Inconsistent, &elts[ddpos..], |s, p| s.print_pat(&p))?; } } else { - try!(self.commasep(Inconsistent, &elts[..], |s, p| s.print_pat(&p))); + self.commasep(Inconsistent, &elts[..], |s, p| s.print_pat(&p))?; } - try!(self.pclose()); + self.pclose()?; } PatKind::Path(None, ref path) => { self.print_path(path, true, 0)?; @@ -2133,16 +2137,7 @@ impl<'a> State<'a> { self.print_path(path, false, 0)?; word(&mut self.s, "::{")?; } - self.commasep(Inconsistent, &segments[..], |s, w| { - match w.node { - hir::PathListIdent { name, .. } => { - s.print_name(name) - } - hir::PathListMod { .. } => { - word(&mut s.s, "self") - } - } - })?; + self.commasep(Inconsistent, &segments[..], |s, w| s.print_name(w.node.name))?; word(&mut self.s, "}") } } @@ -2224,6 +2219,7 @@ impl<'a> State<'a> { id: ast::DUMMY_NODE_ID, predicates: hir::HirVec::new(), }, + span: syntax_pos::DUMMY_SP, }; self.print_fn(decl, unsafety, diff --git a/src/librustc/hir/svh.rs b/src/librustc/hir/svh.rs index d4e797c9f2..ae1f9d3028 100644 --- a/src/librustc/hir/svh.rs +++ b/src/librustc/hir/svh.rs @@ -17,6 +17,7 @@ use std::fmt; use std::hash::{Hash, Hasher}; +use serialize::{Encodable, Decodable, Encoder, Decoder}; #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct Svh { @@ -51,3 +52,17 @@ impl fmt::Display for Svh { f.pad(&self.to_string()) } } + +impl Encodable for Svh { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + s.emit_u64(self.as_u64().to_le()) + } +} + +impl Decodable for Svh { + fn decode(d: &mut D) -> Result { + d.read_u64() + .map(u64::from_le) + .map(Svh::new) + } +} diff --git a/src/librustc/infer/bivariate.rs b/src/librustc/infer/bivariate.rs index 125f815fed..4acb8b807d 100644 --- a/src/librustc/infer/bivariate.rs +++ b/src/librustc/infer/bivariate.rs @@ -106,7 +106,8 @@ impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> } } - fn regions(&mut self, a: ty::Region, _: ty::Region) -> RelateResult<'tcx, ty::Region> { + fn regions(&mut self, a: &'tcx ty::Region, _: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> { Ok(a) } diff --git a/src/librustc/infer/combine.rs b/src/librustc/infer/combine.rs index b4818f963b..5ce30484ed 100644 --- a/src/librustc/infer/combine.rs +++ b/src/librustc/infer/combine.rs @@ -329,8 +329,8 @@ impl<'cx, 'gcx, 'tcx> ty::fold::TypeFolder<'gcx, 'tcx> for Generalizer<'cx, 'gcx } } - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - match r { + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + match *r { // Never make variables for regions bound within the type itself, // nor for erased regions. ty::ReLateBound(..) | diff --git a/src/librustc/infer/equate.rs b/src/librustc/infer/equate.rs index e06f7303ac..bf247acec5 100644 --- a/src/librustc/infer/equate.rs +++ b/src/librustc/infer/equate.rs @@ -79,7 +79,8 @@ impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> } } - fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> { + fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> { debug!("{}.regions({:?}, {:?})", self.tag(), a, diff --git a/src/librustc/infer/error_reporting.rs b/src/librustc/infer/error_reporting.rs index b0dec3277a..2792968d42 100644 --- a/src/librustc/infer/error_reporting.rs +++ b/src/librustc/infer/error_reporting.rs @@ -82,7 +82,6 @@ use hir::def::Def; use hir::def_id::DefId; use infer::{self, TypeOrigin}; use middle::region; -use ty::subst; use ty::{self, TyCtxt, TypeFoldable}; use ty::{Region, ReFree}; use ty::error::TypeError; @@ -100,12 +99,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn note_and_explain_region(self, err: &mut DiagnosticBuilder, prefix: &str, - region: ty::Region, + region: &'tcx ty::Region, suffix: &str) { fn item_scope_tag(item: &hir::Item) -> &'static str { match item.node { hir::ItemImpl(..) => "impl", hir::ItemStruct(..) => "struct", + hir::ItemUnion(..) => "union", hir::ItemEnum(..) => "enum", hir::ItemTrait(..) => "trait", hir::ItemFn(..) => "function body", @@ -121,7 +121,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { Some(span)) } - let (description, span) = match region { + let (description, span) = match *region { ty::ReScope(scope) => { let new_string; let unknown_scope = || { @@ -140,9 +140,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { Some(ast_map::NodeExpr(expr)) => match expr.node { hir::ExprCall(..) => "call", hir::ExprMethodCall(..) => "method call", - hir::ExprMatch(_, _, hir::MatchSource::IfLetDesugar { .. }) => "if let", - hir::ExprMatch(_, _, hir::MatchSource::WhileLetDesugar) => "while let", - hir::ExprMatch(_, _, hir::MatchSource::ForLoopDesugar) => "for", + hir::ExprMatch(.., hir::MatchSource::IfLetDesugar { .. }) => "if let", + hir::ExprMatch(.., hir::MatchSource::WhileLetDesugar) => "while let", + hir::ExprMatch(.., hir::MatchSource::ForLoopDesugar) => "for", hir::ExprMatch(..) => "match", _ => "expression", }, @@ -406,12 +406,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } fn free_regions_from_same_fn<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, - sub: Region, - sup: Region) + sub: &'tcx Region, + sup: &'tcx Region) -> Option { debug!("free_regions_from_same_fn(sub={:?}, sup={:?})", sub, sup); let (scope_id, fr1, fr2) = match (sub, sup) { - (ReFree(fr1), ReFree(fr2)) => { + (&ReFree(fr1), &ReFree(fr2)) => { if fr1.scope != fr2.scope { return None } @@ -488,10 +488,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // if they are both "path types", there's a chance of ambiguity // due to different versions of the same crate match (&exp_found.expected.sty, &exp_found.found.sty) { - (&ty::TyEnum(ref exp_adt, _), &ty::TyEnum(ref found_adt, _)) | - (&ty::TyStruct(ref exp_adt, _), &ty::TyStruct(ref found_adt, _)) | - (&ty::TyEnum(ref exp_adt, _), &ty::TyStruct(ref found_adt, _)) | - (&ty::TyStruct(ref exp_adt, _), &ty::TyEnum(ref found_adt, _)) => { + (&ty::TyAdt(exp_adt, _), &ty::TyAdt(found_adt, _)) => { report_path_match(err, exp_adt.did, found_adt.did); }, _ => () @@ -524,6 +521,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { pub fn note_type_err(&self, diag: &mut DiagnosticBuilder<'tcx>, origin: TypeOrigin, + secondary_span: Option<(Span, String)>, values: Option>, terr: &TypeError<'tcx>) { @@ -549,11 +547,25 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { }; if !is_simple_error { - diag.note_expected_found(&"type", &expected, &found); + if expected == found { + if let &TypeError::Sorts(ref values) = terr { + diag.note_expected_found_extra( + &"type", &expected, &found, + &format!(" ({})", values.expected.sort_string(self.tcx)), + &format!(" ({})", values.found.sort_string(self.tcx))); + } else { + diag.note_expected_found(&"type", &expected, &found); + } + } else { + diag.note_expected_found(&"type", &expected, &found); + } } } diag.span_label(span, &terr); + if let Some((sp, msg)) = secondary_span { + diag.span_label(sp, &msg); + } self.note_error_origin(diag, &origin); self.check_and_note_conflicting_crates(diag, terr, span); @@ -570,7 +582,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.sess, trace.origin.span(), E0308, "{}", trace.origin.as_failure_str() ); - self.note_type_err(&mut diag, trace.origin, Some(trace.values), terr); + self.note_type_err(&mut diag, trace.origin, None, Some(trace.values), terr); diag } @@ -599,7 +611,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { fn report_generic_bound_failure(&self, origin: SubregionOrigin<'tcx>, bound_kind: GenericKind<'tcx>, - sub: Region) + sub: &'tcx Region) { // FIXME: it would be better to report the first error message // with the span of the parameter itself, rather than the span @@ -613,7 +625,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { format!("the associated type `{}`", p), }; - let mut err = match sub { + let mut err = match *sub { ty::ReFree(ty::FreeRegion {bound_region: ty::BrNamed(..), ..}) => { // Does the required lifetime have a nice name we can print? let mut err = struct_span_err!(self.tcx.sess, @@ -664,8 +676,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { fn report_concrete_failure(&self, origin: SubregionOrigin<'tcx>, - sub: Region, - sup: Region) + sub: &'tcx Region, + sup: &'tcx Region) -> DiagnosticBuilder<'tcx> { match origin { infer::Subtype(trace) => { @@ -936,9 +948,9 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { fn report_sub_sup_conflict(&self, var_origin: RegionVariableOrigin, sub_origin: SubregionOrigin<'tcx>, - sub_region: Region, + sub_region: &'tcx Region, sup_origin: SubregionOrigin<'tcx>, - sup_region: Region) { + sup_region: &'tcx Region) { let mut err = self.report_inference_failure(var_origin); self.tcx.note_and_explain_region(&mut err, @@ -1027,7 +1039,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { = node_inner.expect("expect item fn"); let rebuilder = Rebuilder::new(self.tcx, fn_decl, generics, same_regions, &life_giver); let (fn_decl, generics) = rebuilder.rebuild(); - self.give_expl_lifetime_param(err, &fn_decl, unsafety, constness, name, &generics, span); + self.give_expl_lifetime_param( + err, &fn_decl, unsafety, constness, name, &generics, span); } pub fn issue_32330_warnings(&self, span: Span, issue32330s: &[ty::Issue32330]) { @@ -1294,6 +1307,7 @@ impl<'a, 'gcx, 'tcx> Rebuilder<'a, 'gcx, 'tcx> { lifetimes: lifetimes.into(), ty_params: ty_params, where_clause: where_clause, + span: generics.span, } } @@ -1365,11 +1379,12 @@ impl<'a, 'gcx, 'tcx> Rebuilder<'a, 'gcx, 'tcx> { } hir::TyPath(ref maybe_qself, ref path) => { match self.tcx.expect_def(cur_ty.id) { - Def::Enum(did) | Def::TyAlias(did) | Def::Struct(did) => { - let generics = self.tcx.lookup_item_type(did).generics; + Def::Enum(did) | Def::TyAlias(did) | + Def::Struct(did) | Def::Union(did) => { + let generics = self.tcx.lookup_generics(did); let expected = - generics.regions.len(subst::TypeSpace) as u32; + generics.regions.len() as u32; let lifetimes = path.segments.last().unwrap().parameters.lifetimes(); let mut insert = Vec::new(); @@ -1780,7 +1795,7 @@ fn lifetimes_in_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, let method_id_opt = match tcx.map.find(parent) { Some(node) => match node { ast_map::NodeItem(item) => match item.node { - hir::ItemFn(_, _, _, _, ref gen, _) => { + hir::ItemFn(.., ref gen, _) => { taken.extend_from_slice(&gen.lifetimes); None }, @@ -1804,7 +1819,7 @@ fn lifetimes_in_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, if let Some(node) = tcx.map.find(parent) { match node { ast_map::NodeItem(item) => match item.node { - hir::ItemImpl(_, _, ref gen, _, _, _) => { + hir::ItemImpl(_, _, ref gen, ..) => { taken.extend_from_slice(&gen.lifetimes); } _ => () diff --git a/src/librustc/infer/freshen.rs b/src/librustc/infer/freshen.rs index ecd9759c72..eea12b7f19 100644 --- a/src/librustc/infer/freshen.rs +++ b/src/librustc/infer/freshen.rs @@ -32,7 +32,8 @@ use ty::{self, Ty, TyCtxt, TypeFoldable}; use ty::fold::TypeFolder; -use std::collections::hash_map::{self, Entry}; +use util::nodemap::FnvHashMap; +use std::collections::hash_map::Entry; use super::InferCtxt; use super::unify_key::ToType; @@ -40,7 +41,7 @@ use super::unify_key::ToType; pub struct TypeFreshener<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, freshen_count: u32, - freshen_map: hash_map::HashMap>, + freshen_map: FnvHashMap>, } impl<'a, 'gcx, 'tcx> TypeFreshener<'a, 'gcx, 'tcx> { @@ -49,7 +50,7 @@ impl<'a, 'gcx, 'tcx> TypeFreshener<'a, 'gcx, 'tcx> { TypeFreshener { infcx: infcx, freshen_count: 0, - freshen_map: hash_map::HashMap::new(), + freshen_map: FnvHashMap(), } } @@ -83,8 +84,8 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { self.infcx.tcx } - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - match r { + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + match *r { ty::ReEarlyBound(..) | ty::ReLateBound(..) => { // leave bound regions alone @@ -99,7 +100,7 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { ty::ReEmpty | ty::ReErased => { // replace all free regions with 'erased - ty::ReErased + self.tcx().mk_region(ty::ReErased) } } } @@ -155,7 +156,7 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { ty::TyInt(..) | ty::TyUint(..) | ty::TyFloat(..) | - ty::TyEnum(..) | + ty::TyAdt(..) | ty::TyBox(..) | ty::TyStr | ty::TyError | @@ -166,7 +167,6 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyTrait(..) | - ty::TyStruct(..) | ty::TyClosure(..) | ty::TyNever | ty::TyTuple(..) | diff --git a/src/librustc/infer/glb.rs b/src/librustc/infer/glb.rs index 5dd85a31a9..a5709e1880 100644 --- a/src/librustc/infer/glb.rs +++ b/src/librustc/infer/glb.rs @@ -57,7 +57,8 @@ impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> lattice::super_lattice_tys(self, a, b) } - fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> { + fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> { debug!("{}.regions({:?}, {:?})", self.tag(), a, diff --git a/src/librustc/infer/higher_ranked/mod.rs b/src/librustc/infer/higher_ranked/mod.rs index 743d6135fb..7c02de05d2 100644 --- a/src/librustc/infer/higher_ranked/mod.rs +++ b/src/librustc/infer/higher_ranked/mod.rs @@ -130,7 +130,7 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { debug!("higher_ranked_match: skol_map={:?}", skol_map); // Equate types now that bound regions have been replaced. - try!(self.equate(a_is_expected).relate(&a_match, &b_match)); + self.equate(a_is_expected).relate(&a_match, &b_match)?; // Map each skolemized region to a vector of other regions that it // must be equated with. (Note that this vector may include other @@ -164,7 +164,7 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { .map(|(&skol, &(br, ref regions))| { let representative = regions.iter() - .filter(|r| !skol_resolution_map.contains_key(r)) + .filter(|&&r| !skol_resolution_map.contains_key(r)) .cloned() .next() .unwrap_or_else(|| { // [1] @@ -268,9 +268,9 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { snapshot: &CombinedSnapshot, debruijn: ty::DebruijnIndex, new_vars: &[ty::RegionVid], - a_map: &FnvHashMap, - r0: ty::Region) - -> ty::Region { + a_map: &FnvHashMap, + r0: &'tcx ty::Region) + -> &'tcx ty::Region { // Regions that pre-dated the LUB computation stay as they are. if !is_var_in_set(new_vars, r0) { assert!(!r0.is_bound()); @@ -301,7 +301,7 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { debug!("generalize_region(r0={:?}): \ replacing with {:?}, tainted={:?}", r0, *a_br, tainted); - return ty::ReLateBound(debruijn, *a_br); + return infcx.tcx.mk_region(ty::ReLateBound(debruijn, *a_br)); } } @@ -364,10 +364,12 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { snapshot: &CombinedSnapshot, debruijn: ty::DebruijnIndex, new_vars: &[ty::RegionVid], - a_map: &FnvHashMap, + a_map: &FnvHashMap, a_vars: &[ty::RegionVid], b_vars: &[ty::RegionVid], - r0: ty::Region) -> ty::Region { + r0: &'tcx ty::Region) + -> &'tcx ty::Region { if !is_var_in_set(new_vars, r0) { assert!(!r0.is_bound()); return r0; @@ -419,7 +421,7 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { if a_r.is_some() && b_r.is_some() && only_new_vars { // Related to exactly one bound variable from each fn: - return rev_lookup(span, a_map, a_r.unwrap()); + return rev_lookup(infcx, span, a_map, a_r.unwrap()); } else if a_r.is_none() && b_r.is_none() { // Not related to bound variables from either fn: assert!(!r0.is_bound()); @@ -430,13 +432,14 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { } } - fn rev_lookup(span: Span, - a_map: &FnvHashMap, - r: ty::Region) -> ty::Region + fn rev_lookup<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + span: Span, + a_map: &FnvHashMap, + r: &'tcx ty::Region) -> &'tcx ty::Region { for (a_br, a_r) in a_map { if *a_r == r { - return ty::ReLateBound(ty::DebruijnIndex::new(1), *a_br); + return infcx.tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), *a_br)); } } span_bug!( @@ -445,19 +448,21 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { r); } - fn fresh_bound_variable(infcx: &InferCtxt, debruijn: ty::DebruijnIndex) -> ty::Region { + fn fresh_bound_variable<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + debruijn: ty::DebruijnIndex) + -> &'tcx ty::Region { infcx.region_vars.new_bound(debruijn) } } } fn var_ids<'a, 'gcx, 'tcx>(fields: &CombineFields<'a, 'gcx, 'tcx>, - map: &FnvHashMap) + map: &FnvHashMap) -> Vec { map.iter() - .map(|(_, r)| match *r { + .map(|(_, &r)| match *r { ty::ReVar(r) => { r } - r => { + _ => { span_bug!( fields.trace.origin.span(), "found non-region-vid: {:?}", @@ -467,8 +472,8 @@ fn var_ids<'a, 'gcx, 'tcx>(fields: &CombineFields<'a, 'gcx, 'tcx>, .collect() } -fn is_var_in_set(new_vars: &[ty::RegionVid], r: ty::Region) -> bool { - match r { +fn is_var_in_set(new_vars: &[ty::RegionVid], r: &ty::Region) -> bool { + match *r { ty::ReVar(ref v) => new_vars.iter().any(|x| x == v), _ => false } @@ -479,13 +484,13 @@ fn fold_regions_in<'a, 'gcx, 'tcx, T, F>(tcx: TyCtxt<'a, 'gcx, 'tcx>, mut fldr: F) -> T where T: TypeFoldable<'tcx>, - F: FnMut(ty::Region, ty::DebruijnIndex) -> ty::Region, + F: FnMut(&'tcx ty::Region, ty::DebruijnIndex) -> &'tcx ty::Region, { tcx.fold_regions(unbound_value, &mut false, |region, current_depth| { // we should only be encountering "escaping" late-bound regions here, // because the ones at the current level should have been replaced // with fresh variables - assert!(match region { + assert!(match *region { ty::ReLateBound(..) => false, _ => true }); @@ -497,9 +502,9 @@ fn fold_regions_in<'a, 'gcx, 'tcx, T, F>(tcx: TyCtxt<'a, 'gcx, 'tcx>, impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { fn tainted_regions(&self, snapshot: &CombinedSnapshot, - r: ty::Region, + r: &'tcx ty::Region, directions: TaintDirections) - -> FnvHashSet { + -> FnvHashSet<&'tcx ty::Region> { self.region_vars.tainted(&snapshot.region_vars_snapshot, r, directions) } @@ -596,7 +601,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { pub fn skolemize_late_bound_regions(&self, binder: &ty::Binder, snapshot: &CombinedSnapshot) - -> (T, SkolemizationMap) + -> (T, SkolemizationMap<'tcx>) where T : TypeFoldable<'tcx> { let (result, map) = self.tcx.replace_late_bound_regions(binder, |br| { @@ -619,7 +624,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { pub fn leak_check(&self, overly_polymorphic: bool, span: Span, - skol_map: &SkolemizationMap, + skol_map: &SkolemizationMap<'tcx>, snapshot: &CombinedSnapshot) -> RelateResult<'tcx, ()> { @@ -673,13 +678,13 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { for &tainted_region in &incoming_taints { // Each skolemized should only be relatable to itself // or new variables: - match tainted_region { + match *tainted_region { ty::ReVar(vid) => { if new_vars.contains(&vid) { warnings.extend( match self.region_vars.var_origin(vid) { LateBoundRegion(_, - ty::BrNamed(_, _, wc), + ty::BrNamed(.., wc), _) => Some(wc), _ => None, }); @@ -742,7 +747,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// to the depth of the predicate, in this case 1, so that the final /// predicate is `for<'a> &'a int : Clone`. pub fn plug_leaks(&self, - skol_map: SkolemizationMap, + skol_map: SkolemizationMap<'tcx>, snapshot: &CombinedSnapshot, value: &T) -> T where T : TypeFoldable<'tcx> @@ -755,7 +760,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // region back to the `ty::BoundRegion` that it originally // represented. Because `leak_check` passed, we know that // these taint sets are mutually disjoint. - let inv_skol_map: FnvHashMap = + let inv_skol_map: FnvHashMap<&'tcx ty::Region, ty::BoundRegion> = skol_map .iter() .flat_map(|(&skol_br, &skol)| { @@ -794,7 +799,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // (which ought not to escape the snapshot, but we // don't check that) or itself assert!( - match r { + match *r { ty::ReVar(_) => true, ty::ReSkolemized(_, ref br1) => br == br1, _ => false, @@ -802,7 +807,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { "leak-check would have us replace {:?} with {:?}", r, br); - ty::ReLateBound(ty::DebruijnIndex::new(current_depth - 1), br.clone()) + self.tcx.mk_region(ty::ReLateBound( + ty::DebruijnIndex::new(current_depth - 1), br.clone())) } } }); @@ -826,7 +832,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// /// Note: popping also occurs implicitly as part of `leak_check`. pub fn pop_skolemized(&self, - skol_map: SkolemizationMap, + skol_map: SkolemizationMap<'tcx>, snapshot: &CombinedSnapshot) { debug!("pop_skolemized({:?})", skol_map); diff --git a/src/librustc/infer/lub.rs b/src/librustc/infer/lub.rs index ad1b32ffae..7d352be67d 100644 --- a/src/librustc/infer/lub.rs +++ b/src/librustc/infer/lub.rs @@ -57,7 +57,8 @@ impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> lattice::super_lattice_tys(self, a, b) } - fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> { + fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> { debug!("{}.regions({:?}, {:?})", self.tag(), a, diff --git a/src/librustc/infer/mod.rs b/src/librustc/infer/mod.rs index 24fadc549f..4f74e765bd 100644 --- a/src/librustc/infer/mod.rs +++ b/src/librustc/infer/mod.rs @@ -25,9 +25,7 @@ use middle::mem_categorization as mc; use middle::mem_categorization::McResult; use middle::region::CodeExtent; use mir::tcx::LvalueTy; -use ty::subst; -use ty::subst::Substs; -use ty::subst::Subst; +use ty::subst::{Subst, Substs}; use ty::adjustment; use ty::{TyVid, IntVid, FloatVid}; use ty::{self, Ty, TyCtxt}; @@ -138,13 +136,6 @@ pub struct InferCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { // avoid reporting the same error twice. pub reported_trait_errors: RefCell>>, - // This is a temporary field used for toggling on normalization in the inference context, - // as we move towards the approach described here: - // https://internals.rust-lang.org/t/flattening-the-contexts-for-fun-and-profit/2293 - // At a point sometime in the future normalization will be done by the typing context - // directly. - normalize: bool, - // Sadly, the behavior of projection varies a bit depending on the // stage of compilation. The specifics are given in the // documentation for `Reveal`. @@ -179,7 +170,7 @@ pub struct InferCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { /// A map returned by `skolemize_late_bound_regions()` indicating the skolemized /// region that each late-bound region was replaced with. -pub type SkolemizationMap = FnvHashMap; +pub type SkolemizationMap<'tcx> = FnvHashMap; /// Why did we require that the two types be related? /// @@ -234,7 +225,7 @@ impl TypeOrigin { &TypeOrigin::RelateOutputImplTypes(_) | &TypeOrigin::ExprAssignable(_) => "mismatched types", &TypeOrigin::MethodCompatCheck(_) => "method not compatible with trait", - &TypeOrigin::MatchExpressionArm(_, _, source) => match source { + &TypeOrigin::MatchExpressionArm(.., source) => match source { hir::MatchSource::IfLetDesugar{..} => "`if let` arms have incompatible types", _ => "match arms have incompatible types", }, @@ -257,7 +248,7 @@ impl TypeOrigin { &TypeOrigin::RelateOutputImplTypes(_) => { "trait type parameters matches those specified on the impl" } - &TypeOrigin::MatchExpressionArm(_, _, _) => "match arms have compatible types", + &TypeOrigin::MatchExpressionArm(..) => "match arms have compatible types", &TypeOrigin::IfExpression(_) => "if and else have compatible types", &TypeOrigin::IfExpressionWithNoElse(_) => "if missing an else returns ()", &TypeOrigin::RangeExpression(_) => "start and end of range have compatible types", @@ -460,7 +451,6 @@ pub struct InferCtxtBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { tables: Option>>, param_env: Option>, projection_mode: Reveal, - normalize: bool } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'gcx> { @@ -475,19 +465,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'gcx> { tables: tables.map(RefCell::new), param_env: param_env, projection_mode: projection_mode, - normalize: false - } - } - - pub fn normalizing_infer_ctxt(self, projection_mode: Reveal) - -> InferCtxtBuilder<'a, 'gcx, 'tcx> { - InferCtxtBuilder { - global_tcx: self, - arenas: ty::CtxtArenas::new(), - tables: None, - param_env: None, - projection_mode: projection_mode, - normalize: false } } @@ -508,7 +485,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'gcx> { evaluation_cache: traits::EvaluationCache::new(), projection_cache: RefCell::new(traits::ProjectionCache::new()), reported_trait_errors: RefCell::new(FnvHashSet()), - normalize: false, projection_mode: Reveal::NotSpecializable, tainted_by_errors_flag: Cell::new(false), err_count_on_creation: self.sess.err_count(), @@ -527,7 +503,6 @@ impl<'a, 'gcx, 'tcx> InferCtxtBuilder<'a, 'gcx, 'tcx> { ref tables, ref mut param_env, projection_mode, - normalize } = *self; let tables = if let Some(ref tables) = *tables { InferTables::Local(tables) @@ -549,7 +524,6 @@ impl<'a, 'gcx, 'tcx> InferCtxtBuilder<'a, 'gcx, 'tcx> { selection_cache: traits::SelectionCache::new(), evaluation_cache: traits::EvaluationCache::new(), reported_trait_errors: RefCell::new(FnvHashSet()), - normalize: normalize, projection_mode: projection_mode, tainted_by_errors_flag: Cell::new(false), err_count_on_creation: tcx.sess.err_count(), @@ -609,7 +583,8 @@ impl_trans_normalize!('gcx, ty::FnSig<'gcx>, &'gcx ty::BareFnTy<'gcx>, ty::ClosureSubsts<'gcx>, - ty::PolyTraitRef<'gcx> + ty::PolyTraitRef<'gcx>, + ty::ExistentialTraitRef<'gcx> ); impl<'gcx> TransNormalize<'gcx> for LvalueTy<'gcx> { @@ -629,6 +604,18 @@ impl<'gcx> TransNormalize<'gcx> for LvalueTy<'gcx> { // NOTE: Callable from trans only! impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { + /// Currently, higher-ranked type bounds inhibit normalization. Therefore, + /// each time we erase them in translation, we need to normalize + /// the contents. + pub fn erase_late_bound_regions_and_normalize(self, value: &ty::Binder) + -> T + where T: TransNormalize<'tcx> + { + assert!(!value.needs_subst()); + let value = self.erase_late_bound_regions(value); + self.normalize_associated_type(&value) + } + pub fn normalize_associated_type(self, value: &T) -> T where T: TransNormalize<'tcx> { @@ -685,6 +672,15 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result) } + /// Finishes processes any obligations that remain in the + /// fulfillment context, and then returns the result with all type + /// variables removed and regions erased. Because this is intended + /// for use after type-check has completed, if any errors occur, + /// it will panic. It is used during normalization and other cases + /// where processing the obligations in `fulfill_cx` may cause + /// type inference variables that appear in `result` to be + /// unified, and hence we need to process those obligations to get + /// the complete picture of the type. pub fn drain_fulfillment_cx_or_panic(&self, span: Span, fulfill_cx: &mut traits::FulfillmentContext<'tcx>, @@ -694,47 +690,28 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { { debug!("drain_fulfillment_cx_or_panic()"); - let when = "resolving bounds after type-checking"; - let v = match self.drain_fulfillment_cx(fulfill_cx, result) { - Ok(v) => v, + // In principle, we only need to do this so long as `result` + // contains unbound type parameters. It could be a slight + // optimization to stop iterating early. + match fulfill_cx.select_all_or_error(self) { + Ok(()) => { } Err(errors) => { - span_bug!(span, "Encountered errors `{:?}` {}", errors, when); + span_bug!(span, "Encountered errors `{:?}` resolving bounds after type-checking", + errors); } - }; + } + + let result = self.resolve_type_vars_if_possible(result); + let result = self.tcx.erase_regions(&result); - match self.tcx.lift_to_global(&v) { - Some(v) => v, + match self.tcx.lift_to_global(&result) { + Some(result) => result, None => { - span_bug!(span, "Uninferred types/regions in `{:?}` {}", v, when); + span_bug!(span, "Uninferred types/regions in `{:?}`", result); } } } - /// Finishes processes any obligations that remain in the fulfillment - /// context, and then "freshens" and returns `result`. This is - /// primarily used during normalization and other cases where - /// processing the obligations in `fulfill_cx` may cause type - /// inference variables that appear in `result` to be unified, and - /// hence we need to process those obligations to get the complete - /// picture of the type. - pub fn drain_fulfillment_cx(&self, - fulfill_cx: &mut traits::FulfillmentContext<'tcx>, - result: &T) - -> Result>> - where T : TypeFoldable<'tcx> - { - debug!("drain_fulfillment_cx(result={:?})", - result); - - // In principle, we only need to do this so long as `result` - // contains unbound type parameters. It could be a slight - // optimization to stop iterating early. - fulfill_cx.select_all_or_error(self)?; - - let result = self.resolve_type_vars_if_possible(result); - Ok(self.tcx.erase_regions(&result)) - } - pub fn projection_mode(&self) -> Reveal { self.projection_mode } @@ -866,6 +843,33 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { result.map(move |t| InferOk { value: t, obligations: fields.obligations }) } + // Clear the "obligations in snapshot" flag, invoke the closure, + // then restore the flag to its original value. This flag is a + // debugging measure designed to detect cases where we start a + // snapshot, create type variables, register obligations involving + // those type variables in the fulfillment cx, and then have to + // unroll the snapshot, leaving "dangling type variables" behind. + // In such cases, the flag will be set by the fulfillment cx, and + // an assertion will fail when rolling the snapshot back. Very + // useful, much better than grovelling through megabytes of + // RUST_LOG output. + // + // HOWEVER, in some cases the flag is wrong. In particular, we + // sometimes create a "mini-fulfilment-cx" in which we enroll + // obligations. As long as this fulfillment cx is fully drained + // before we return, this is not a problem, as there won't be any + // escaping obligations in the main cx. In those cases, you can + // use this function. + pub fn save_and_restore_obligations_in_snapshot_flag(&self, func: F) -> R + where F: FnOnce(&Self) -> R + { + let flag = self.obligations_in_snapshot.get(); + self.obligations_in_snapshot.set(false); + let result = func(self); + self.obligations_in_snapshot.set(flag); + result + } + fn start_snapshot(&self) -> CombinedSnapshot { debug!("start_snapshot()"); @@ -1125,8 +1129,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { pub fn sub_regions(&self, origin: SubregionOrigin<'tcx>, - a: ty::Region, - b: ty::Region) { + a: &'tcx ty::Region, + b: &'tcx ty::Region) { debug!("sub_regions({:?} <: {:?})", a, b); self.region_vars.make_subregion(origin, a, b); } @@ -1149,7 +1153,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { pub fn region_outlives_predicate(&self, span: Span, - predicate: &ty::PolyRegionOutlivesPredicate) + predicate: &ty::PolyRegionOutlivesPredicate<'tcx>) -> UnitResult<'tcx> { self.commit_if_ok(|snapshot| { @@ -1172,15 +1176,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.mk_var(self.next_ty_var_id(false)) } - pub fn next_ty_var_with_default(&self, - default: Option>) -> Ty<'tcx> { - let ty_var_id = self.type_variables - .borrow_mut() - .new_var(false, default); - - self.tcx.mk_var(ty_var_id) - } - pub fn next_diverging_ty_var(&self) -> Ty<'tcx> { self.tcx.mk_var(self.next_ty_var_id(true)) } @@ -1201,96 +1196,63 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { .new_key(None) } - pub fn next_region_var(&self, origin: RegionVariableOrigin) -> ty::Region { - ty::ReVar(self.region_vars.new_region_var(origin)) - } - - pub fn region_vars_for_defs(&self, - span: Span, - defs: &[ty::RegionParameterDef]) - -> Vec { - defs.iter() - .map(|d| self.next_region_var(EarlyBoundRegion(span, d.name))) - .collect() + pub fn next_region_var(&self, origin: RegionVariableOrigin) + -> &'tcx ty::Region { + self.tcx.mk_region(ty::ReVar(self.region_vars.new_region_var(origin))) } - // We have to take `&mut Substs` in order to provide the correct substitutions for defaults - // along the way, for this reason we don't return them. - pub fn type_vars_for_defs(&self, + /// Create a region inference variable for the given + /// region parameter definition. + pub fn region_var_for_def(&self, span: Span, - space: subst::ParamSpace, - substs: &mut Substs<'tcx>, - defs: &[ty::TypeParameterDef<'tcx>]) { - - for def in defs.iter() { - let default = def.default.map(|default| { - type_variable::Default { - ty: default.subst_spanned(self.tcx, substs, Some(span)), - origin_span: span, - def_id: def.default_def_id - } - }); - - let ty_var = self.next_ty_var_with_default(default); - substs.types.push(space, ty_var); - } - } - - /// Given a set of generics defined on a type or impl, returns a substitution mapping each - /// type/region parameter to a fresh inference variable. - pub fn fresh_substs_for_generics(&self, - span: Span, - generics: &ty::Generics<'tcx>) - -> &'tcx subst::Substs<'tcx> - { - let type_params = subst::VecPerParamSpace::empty(); - - let region_params = - generics.regions.map( - |d| self.next_region_var(EarlyBoundRegion(span, d.name))); + def: &ty::RegionParameterDef) + -> &'tcx ty::Region { + self.next_region_var(EarlyBoundRegion(span, def.name)) + } + + /// Create a type inference variable for the given + /// type parameter definition. The substitutions are + /// for actual parameters that may be referred to by + /// the default of this type parameter, if it exists. + /// E.g. `struct Foo(...);` when + /// used in a path such as `Foo::::new()` will + /// use an inference variable for `C` with `[T, U]` + /// as the substitutions for the default, `(T, U)`. + pub fn type_var_for_def(&self, + span: Span, + def: &ty::TypeParameterDef<'tcx>, + substs: &Substs<'tcx>) + -> Ty<'tcx> { + let default = def.default.map(|default| { + type_variable::Default { + ty: default.subst_spanned(self.tcx, substs, Some(span)), + origin_span: span, + def_id: def.default_def_id + } + }); - let mut substs = subst::Substs::new(type_params, region_params); - for space in subst::ParamSpace::all().iter() { - self.type_vars_for_defs( - span, - *space, - &mut substs, - generics.types.get_slice(*space)); - } + let ty_var_id = self.type_variables + .borrow_mut() + .new_var(false, default); - self.tcx.mk_substs(substs) + self.tcx.mk_var(ty_var_id) } - /// Given a set of generics defined on a trait, returns a substitution mapping each output - /// type/region parameter to a fresh inference variable, and mapping the self type to - /// `self_ty`. - pub fn fresh_substs_for_trait(&self, - span: Span, - generics: &ty::Generics<'tcx>, - self_ty: Ty<'tcx>) - -> subst::Substs<'tcx> - { - - assert!(generics.types.len(subst::SelfSpace) == 1); - assert!(generics.types.len(subst::FnSpace) == 0); - assert!(generics.regions.len(subst::SelfSpace) == 0); - assert!(generics.regions.len(subst::FnSpace) == 0); - - let type_params = Vec::new(); - - let region_param_defs = generics.regions.get_slice(subst::TypeSpace); - let regions = self.region_vars_for_defs(span, region_param_defs); - - let mut substs = subst::Substs::new_trait(type_params, regions, self_ty); - - let type_parameter_defs = generics.types.get_slice(subst::TypeSpace); - self.type_vars_for_defs(span, subst::TypeSpace, &mut substs, type_parameter_defs); - - return substs; + /// Given a set of generics defined on a type or impl, returns a substitution mapping each + /// type/region parameter to a fresh inference variable. + pub fn fresh_substs_for_item(&self, + span: Span, + def_id: DefId) + -> &'tcx Substs<'tcx> { + Substs::for_item(self.tcx, def_id, |def, _| { + self.region_var_for_def(span, def) + }, |def, substs| { + self.type_var_for_def(span, def, substs) + }) } - pub fn fresh_bound_region(&self, debruijn: ty::DebruijnIndex) -> ty::Region { + pub fn fresh_bound_region(&self, debruijn: ty::DebruijnIndex) -> &'tcx ty::Region { self.region_vars.new_bound(debruijn) } @@ -1575,7 +1537,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { span: Span, lbrct: LateBoundRegionConversionTime, value: &ty::Binder) - -> (T, FnvHashMap) + -> (T, FnvHashMap) where T : TypeFoldable<'tcx> { self.tcx.replace_late_bound_regions( @@ -1621,8 +1583,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { pub fn verify_generic_bound(&self, origin: SubregionOrigin<'tcx>, kind: GenericKind<'tcx>, - a: ty::Region, - bound: VerifyBound) { + a: &'tcx ty::Region, + bound: VerifyBound<'tcx>) { debug!("verify_generic_bound({:?}, {:?} <: {:?})", kind, a, @@ -1711,7 +1673,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.region_maps.temporary_scope(rvalue_id) } - pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option { + pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option> { self.tables.borrow().upvar_capture_map.get(&upvar_id).cloned() } @@ -1746,17 +1708,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } let closure_ty = self.tcx.closure_type(def_id, substs); - if self.normalize { - let closure_ty = self.tcx.erase_regions(&closure_ty); - - if !closure_ty.has_projection_types() { - return closure_ty; - } - - self.normalize_projections_in(&closure_ty) - } else { - closure_ty - } + closure_ty } } @@ -1800,7 +1752,7 @@ impl TypeOrigin { TypeOrigin::ExprAssignable(span) => span, TypeOrigin::Misc(span) => span, TypeOrigin::RelateOutputImplTypes(span) => span, - TypeOrigin::MatchExpressionArm(match_span, _, _) => match_span, + TypeOrigin::MatchExpressionArm(match_span, ..) => match_span, TypeOrigin::IfExpression(span) => span, TypeOrigin::IfExpressionWithNoElse(span) => span, TypeOrigin::RangeExpression(span) => span, @@ -1853,7 +1805,7 @@ impl RegionVariableOrigin { Autoref(a) => a, Coercion(a) => a, EarlyBoundRegion(a, _) => a, - LateBoundRegion(a, _, _) => a, + LateBoundRegion(a, ..) => a, BoundRegionInCoherence(_) => syntax_pos::DUMMY_SP, UpvarRegion(_, a) => a } diff --git a/src/librustc/infer/region_inference/graphviz.rs b/src/librustc/infer/region_inference/graphviz.rs index 905ad7c0fa..289f7d6c73 100644 --- a/src/librustc/infer/region_inference/graphviz.rs +++ b/src/librustc/infer/region_inference/graphviz.rs @@ -63,9 +63,8 @@ pub fn maybe_print_constraints_for<'a, 'gcx, 'tcx>( return; } - let requested_node: Option = env::var("RUST_REGION_GRAPH_NODE") - .ok() - .and_then(|s| s.parse().ok()); + let requested_node = env::var("RUST_REGION_GRAPH_NODE") + .ok().and_then(|s| s.parse().map(ast::NodeId::new).ok()); if requested_node.is_some() && requested_node != Some(subject_node) { return; @@ -123,7 +122,7 @@ pub fn maybe_print_constraints_for<'a, 'gcx, 'tcx>( struct ConstraintGraph<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'tcx>, graph_name: String, - map: &'a FnvHashMap>, + map: &'a FnvHashMap, SubregionOrigin<'tcx>>, node_ids: FnvHashMap, } @@ -135,8 +134,8 @@ enum Node { // type Edge = Constraint; #[derive(Clone, PartialEq, Eq, Debug, Copy)] -enum Edge { - Constraint(Constraint), +enum Edge<'tcx> { + Constraint(Constraint<'tcx>), EnclScope(CodeExtent, CodeExtent), } @@ -177,7 +176,7 @@ impl<'a, 'gcx, 'tcx> ConstraintGraph<'a, 'gcx, 'tcx> { impl<'a, 'gcx, 'tcx> dot::Labeller<'a> for ConstraintGraph<'a, 'gcx, 'tcx> { type Node = Node; - type Edge = Edge; + type Edge = Edge<'tcx>; fn graph_id(&self) -> dot::Id { dot::Id::new(&*self.graph_name).unwrap() } @@ -214,11 +213,11 @@ fn constraint_to_nodes(c: &Constraint) -> (Node, Node) { Constraint::ConstrainVarSubVar(rv_1, rv_2) => (Node::RegionVid(rv_1), Node::RegionVid(rv_2)), Constraint::ConstrainRegSubVar(r_1, rv_2) => - (Node::Region(r_1), Node::RegionVid(rv_2)), + (Node::Region(*r_1), Node::RegionVid(rv_2)), Constraint::ConstrainVarSubReg(rv_1, r_2) => - (Node::RegionVid(rv_1), Node::Region(r_2)), + (Node::RegionVid(rv_1), Node::Region(*r_2)), Constraint::ConstrainRegSubReg(r_1, r_2) => - (Node::Region(r_1), Node::Region(r_2)), + (Node::Region(*r_1), Node::Region(*r_2)), } } @@ -234,7 +233,7 @@ fn edge_to_nodes(e: &Edge) -> (Node, Node) { impl<'a, 'gcx, 'tcx> dot::GraphWalk<'a> for ConstraintGraph<'a, 'gcx, 'tcx> { type Node = Node; - type Edge = Edge; + type Edge = Edge<'tcx>; fn nodes(&self) -> dot::Nodes { let mut set = FnvHashSet(); for node in self.node_ids.keys() { @@ -243,26 +242,26 @@ impl<'a, 'gcx, 'tcx> dot::GraphWalk<'a> for ConstraintGraph<'a, 'gcx, 'tcx> { debug!("constraint graph has {} nodes", set.len()); set.into_iter().collect() } - fn edges(&self) -> dot::Edges { + fn edges(&self) -> dot::Edges> { debug!("constraint graph has {} edges", self.map.len()); let mut v: Vec<_> = self.map.keys().map(|e| Edge::Constraint(*e)).collect(); self.tcx.region_maps.each_encl_scope(|sub, sup| v.push(Edge::EnclScope(*sub, *sup))); debug!("region graph has {} edges", v.len()); Cow::Owned(v) } - fn source(&self, edge: &Edge) -> Node { + fn source(&self, edge: &Edge<'tcx>) -> Node { let (n1, _) = edge_to_nodes(edge); debug!("edge {:?} has source {:?}", edge, n1); n1 } - fn target(&self, edge: &Edge) -> Node { + fn target(&self, edge: &Edge<'tcx>) -> Node { let (_, n2) = edge_to_nodes(edge); debug!("edge {:?} has target {:?}", edge, n2); n2 } } -pub type ConstraintMap<'tcx> = FnvHashMap>; +pub type ConstraintMap<'tcx> = FnvHashMap, SubregionOrigin<'tcx>>; fn dump_region_constraints_to<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, map: &ConstraintMap<'tcx>, diff --git a/src/librustc/infer/region_inference/mod.rs b/src/librustc/infer/region_inference/mod.rs index d3b4afa2ce..ef36ffa831 100644 --- a/src/librustc/infer/region_inference/mod.rs +++ b/src/librustc/infer/region_inference/mod.rs @@ -39,22 +39,22 @@ mod graphviz; // A constraint that influences the inference process. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum Constraint { +pub enum Constraint<'tcx> { // One region variable is subregion of another ConstrainVarSubVar(RegionVid, RegionVid), // Concrete region is subregion of region variable - ConstrainRegSubVar(Region, RegionVid), + ConstrainRegSubVar(&'tcx Region, RegionVid), // Region variable is subregion of concrete region. This does not // directly affect inference, but instead is checked after // inference is complete. - ConstrainVarSubReg(RegionVid, Region), + ConstrainVarSubReg(RegionVid, &'tcx Region), // A constraint where neither side is a variable. This does not // directly affect inference, but instead is checked after // inference is complete. - ConstrainRegSubReg(Region, Region), + ConstrainRegSubReg(&'tcx Region, &'tcx Region), } // VerifyGenericBound(T, _, R, RS): The parameter type `T` (or @@ -66,8 +66,8 @@ pub enum Constraint { pub struct Verify<'tcx> { kind: GenericKind<'tcx>, origin: SubregionOrigin<'tcx>, - region: Region, - bound: VerifyBound, + region: &'tcx Region, + bound: VerifyBound<'tcx>, } #[derive(Copy, Clone, PartialEq, Eq)] @@ -80,36 +80,36 @@ pub enum GenericKind<'tcx> { // particular region (let's call it `'min`) meets some bound. // The bound is described the by the following grammar: #[derive(Debug)] -pub enum VerifyBound { +pub enum VerifyBound<'tcx> { // B = exists {R} --> some 'r in {R} must outlive 'min // // Put another way, the subject value is known to outlive all // regions in {R}, so if any of those outlives 'min, then the // bound is met. - AnyRegion(Vec), + AnyRegion(Vec<&'tcx Region>), // B = forall {R} --> all 'r in {R} must outlive 'min // // Put another way, the subject value is known to outlive some // region in {R}, so if all of those outlives 'min, then the bound // is met. - AllRegions(Vec), + AllRegions(Vec<&'tcx Region>), // B = exists {B} --> 'min must meet some bound b in {B} - AnyBound(Vec), + AnyBound(Vec>), // B = forall {B} --> 'min must meet all bounds b in {B} - AllBounds(Vec), + AllBounds(Vec>), } #[derive(Copy, Clone, PartialEq, Eq, Hash)] -pub struct TwoRegions { - a: Region, - b: Region, +pub struct TwoRegions<'tcx> { + a: &'tcx Region, + b: &'tcx Region, } #[derive(Copy, Clone, PartialEq)] -pub enum UndoLogEntry { +pub enum UndoLogEntry<'tcx> { /// Pushed when we start a snapshot. OpenSnapshot, @@ -122,7 +122,7 @@ pub enum UndoLogEntry { AddVar(RegionVid), /// We added the given `constraint` - AddConstraint(Constraint), + AddConstraint(Constraint<'tcx>), /// We added the given `verify` AddVerify(usize), @@ -131,7 +131,7 @@ pub enum UndoLogEntry { AddGiven(ty::FreeRegion, ty::RegionVid), /// We added a GLB/LUB "combinaton variable" - AddCombination(CombineMapType, TwoRegions), + AddCombination(CombineMapType, TwoRegions<'tcx>), /// During skolemization, we sometimes purge entries from the undo /// log in a kind of minisnapshot (unlike other snapshots, this @@ -153,13 +153,13 @@ pub enum RegionResolutionError<'tcx> { /// `ConcreteFailure(o, a, b)`: /// /// `o` requires that `a <= b`, but this does not hold - ConcreteFailure(SubregionOrigin<'tcx>, Region, Region), + ConcreteFailure(SubregionOrigin<'tcx>, &'tcx Region, &'tcx Region), /// `GenericBoundFailure(p, s, a) /// /// The parameter/associated-type `p` must be known to outlive the lifetime /// `a` (but none of the known bounds are sufficient). - GenericBoundFailure(SubregionOrigin<'tcx>, GenericKind<'tcx>, Region), + GenericBoundFailure(SubregionOrigin<'tcx>, GenericKind<'tcx>, &'tcx Region), /// `SubSupConflict(v, sub_origin, sub_r, sup_origin, sup_r)`: /// @@ -168,9 +168,9 @@ pub enum RegionResolutionError<'tcx> { /// `sub_r <= sup_r` does not hold. SubSupConflict(RegionVariableOrigin, SubregionOrigin<'tcx>, - Region, + &'tcx Region, SubregionOrigin<'tcx>, - Region), + &'tcx Region), /// For subsets of `ConcreteFailure` and `SubSupConflict`, we can derive /// more specific errors message by suggesting to the user where they @@ -182,7 +182,7 @@ pub enum RegionResolutionError<'tcx> { #[derive(Clone, Debug)] pub enum ProcessedErrorOrigin<'tcx> { - ConcreteFailure(SubregionOrigin<'tcx>, Region, Region), + ConcreteFailure(SubregionOrigin<'tcx>, &'tcx Region, &'tcx Region), VariableFailure(RegionVariableOrigin), } @@ -213,7 +213,7 @@ impl SameRegions { } } -pub type CombineMap = FnvHashMap; +pub type CombineMap<'tcx> = FnvHashMap, RegionVid>; pub struct RegionVarBindings<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'tcx>, @@ -222,7 +222,7 @@ pub struct RegionVarBindings<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { // Constraints of the form `A <= B` introduced by the region // checker. Here at least one of `A` and `B` must be a region // variable. - constraints: RefCell>>, + constraints: RefCell, SubregionOrigin<'tcx>>>, // A "verify" is something that we need to verify after inference is // done, but which does not directly affect inference in any way. @@ -250,8 +250,8 @@ pub struct RegionVarBindings<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { // a bit of a hack but seems to work. givens: RefCell>, - lubs: RefCell, - glbs: RefCell, + lubs: RefCell>, + glbs: RefCell>, skolemization_count: Cell, bound_count: Cell, @@ -264,12 +264,12 @@ pub struct RegionVarBindings<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { // otherwise we end up adding entries for things like the lower // bound on a variable and so forth, which can never be rolled // back. - undo_log: RefCell>, + undo_log: RefCell>>, unification_table: RefCell>, // This contains the results of inference. It begins as an empty // option and only acquires a value after inference is complete. - values: RefCell>>, + values: RefCell>>>, } pub struct RegionSnapshot { @@ -303,14 +303,14 @@ impl TaintDirections { } } -struct TaintSet { +struct TaintSet<'tcx> { directions: TaintDirections, - regions: FnvHashSet + regions: FnvHashSet<&'tcx ty::Region> } -impl TaintSet { +impl<'a, 'gcx, 'tcx> TaintSet<'tcx> { fn new(directions: TaintDirections, - initial_region: ty::Region) + initial_region: &'tcx ty::Region) -> Self { let mut regions = FnvHashSet(); regions.insert(initial_region); @@ -318,8 +318,9 @@ impl TaintSet { } fn fixed_point(&mut self, - undo_log: &[UndoLogEntry], - verifys: &[Verify]) { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + undo_log: &[UndoLogEntry<'tcx>], + verifys: &[Verify<'tcx>]) { let mut prev_len = 0; while prev_len < self.len() { debug!("tainted: prev_len = {:?} new_len = {:?}", @@ -330,19 +331,21 @@ impl TaintSet { for undo_entry in undo_log { match undo_entry { &AddConstraint(ConstrainVarSubVar(a, b)) => { - self.add_edge(ReVar(a), ReVar(b)); + self.add_edge(tcx.mk_region(ReVar(a)), + tcx.mk_region(ReVar(b))); } &AddConstraint(ConstrainRegSubVar(a, b)) => { - self.add_edge(a, ReVar(b)); + self.add_edge(a, tcx.mk_region(ReVar(b))); } &AddConstraint(ConstrainVarSubReg(a, b)) => { - self.add_edge(ReVar(a), b); + self.add_edge(tcx.mk_region(ReVar(a)), b); } &AddConstraint(ConstrainRegSubReg(a, b)) => { self.add_edge(a, b); } &AddGiven(a, b) => { - self.add_edge(ReFree(a), ReVar(b)); + self.add_edge(tcx.mk_region(ReFree(a)), + tcx.mk_region(ReVar(b))); } &AddVerify(i) => { verifys[i].bound.for_each_region(&mut |b| { @@ -359,7 +362,7 @@ impl TaintSet { } } - fn into_set(self) -> FnvHashSet { + fn into_set(self) -> FnvHashSet<&'tcx ty::Region> { self.regions } @@ -368,8 +371,8 @@ impl TaintSet { } fn add_edge(&mut self, - source: ty::Region, - target: ty::Region) { + source: &'tcx ty::Region, + target: &'tcx ty::Region) { if self.directions.incoming { if self.regions.contains(&target) { self.regions.insert(source); @@ -450,7 +453,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { .rollback_to(snapshot.region_snapshot); } - pub fn rollback_undo_entry(&self, undo_entry: UndoLogEntry) { + pub fn rollback_undo_entry(&self, undo_entry: UndoLogEntry<'tcx>) { match undo_entry { OpenSnapshot => { panic!("Failure to observe stack discipline"); @@ -529,13 +532,14 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { /// The `snapshot` argument to this function is not really used; /// it's just there to make it explicit which snapshot bounds the /// skolemized region that results. It should always be the top-most snapshot. - pub fn push_skolemized(&self, br: ty::BoundRegion, snapshot: &RegionSnapshot) -> Region { + pub fn push_skolemized(&self, br: ty::BoundRegion, snapshot: &RegionSnapshot) + -> &'tcx Region { assert!(self.in_snapshot()); assert!(self.undo_log.borrow()[snapshot.length] == OpenSnapshot); let sc = self.skolemization_count.get(); self.skolemization_count.set(sc + 1); - ReSkolemized(ty::SkolemizedRegionVid { index: sc }, br) + self.tcx.mk_region(ReSkolemized(ty::SkolemizedRegionVid { index: sc }, br)) } /// Removes all the edges to/from the skolemized regions that are @@ -543,7 +547,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { /// completes to remove all trace of the skolemized regions /// created in that time. pub fn pop_skolemized(&self, - skols: &FnvHashSet, + skols: &FnvHashSet<&'tcx ty::Region>, snapshot: &RegionSnapshot) { debug!("pop_skolemized_regions(skols={:?})", skols); @@ -566,7 +570,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { skols.len()); debug_assert! { skols.iter() - .all(|k| match *k { + .all(|&k| match *k { ty::ReSkolemized(index, _) => index.index >= first_to_pop && index.index < last_to_pop, @@ -597,11 +601,11 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { self.skolemization_count.set(snapshot.skolemization_count); return; - fn kill_constraint(skols: &FnvHashSet, - undo_entry: &UndoLogEntry) - -> bool { + fn kill_constraint<'tcx>(skols: &FnvHashSet<&'tcx ty::Region>, + undo_entry: &UndoLogEntry<'tcx>) + -> bool { match undo_entry { - &AddConstraint(ConstrainVarSubVar(_, _)) => + &AddConstraint(ConstrainVarSubVar(..)) => false, &AddConstraint(ConstrainRegSubVar(a, _)) => skols.contains(&a), @@ -609,7 +613,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { skols.contains(&b), &AddConstraint(ConstrainRegSubReg(a, b)) => skols.contains(&a) || skols.contains(&b), - &AddGiven(_, _) => + &AddGiven(..) => false, &AddVerify(_) => false, @@ -626,7 +630,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { } - pub fn new_bound(&self, debruijn: ty::DebruijnIndex) -> Region { + pub fn new_bound(&self, debruijn: ty::DebruijnIndex) -> &'tcx Region { // Creates a fresh bound variable for use in GLB computations. // See discussion of GLB computation in the large comment at // the top of this file for more details. @@ -652,14 +656,14 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { bug!("rollover in RegionInference new_bound()"); } - ReLateBound(debruijn, BrFresh(sc)) + self.tcx.mk_region(ReLateBound(debruijn, BrFresh(sc))) } fn values_are_none(&self) -> bool { self.values.borrow().is_none() } - fn add_constraint(&self, constraint: Constraint, origin: SubregionOrigin<'tcx>) { + fn add_constraint(&self, constraint: Constraint<'tcx>, origin: SubregionOrigin<'tcx>) { // cannot add constraints once regions are resolved assert!(self.values_are_none()); @@ -704,20 +708,26 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { } } - pub fn make_eqregion(&self, origin: SubregionOrigin<'tcx>, sub: Region, sup: Region) { + pub fn make_eqregion(&self, + origin: SubregionOrigin<'tcx>, + sub: &'tcx Region, + sup: &'tcx Region) { if sub != sup { // Eventually, it would be nice to add direct support for // equating regions. self.make_subregion(origin.clone(), sub, sup); self.make_subregion(origin, sup, sub); - if let (ty::ReVar(sub), ty::ReVar(sup)) = (sub, sup) { + if let (ty::ReVar(sub), ty::ReVar(sup)) = (*sub, *sup) { self.unification_table.borrow_mut().union(sub, sup); } } } - pub fn make_subregion(&self, origin: SubregionOrigin<'tcx>, sub: Region, sup: Region) { + pub fn make_subregion(&self, + origin: SubregionOrigin<'tcx>, + sub: &'tcx Region, + sup: &'tcx Region) { // cannot add constraints once regions are resolved assert!(self.values_are_none()); @@ -727,26 +737,26 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { origin); match (sub, sup) { - (ReEarlyBound(..), _) | - (ReLateBound(..), _) | - (_, ReEarlyBound(..)) | - (_, ReLateBound(..)) => { + (&ReEarlyBound(..), _) | + (&ReLateBound(..), _) | + (_, &ReEarlyBound(..)) | + (_, &ReLateBound(..)) => { span_bug!(origin.span(), "cannot relate bound region: {:?} <= {:?}", sub, sup); } - (_, ReStatic) => { + (_, &ReStatic) => { // all regions are subregions of static, so we can ignore this } - (ReVar(sub_id), ReVar(sup_id)) => { + (&ReVar(sub_id), &ReVar(sup_id)) => { self.add_constraint(ConstrainVarSubVar(sub_id, sup_id), origin); } - (r, ReVar(sup_id)) => { - self.add_constraint(ConstrainRegSubVar(r, sup_id), origin); + (_, &ReVar(sup_id)) => { + self.add_constraint(ConstrainRegSubVar(sub, sup_id), origin); } - (ReVar(sub_id), r) => { - self.add_constraint(ConstrainVarSubReg(sub_id, r), origin); + (&ReVar(sub_id), _) => { + self.add_constraint(ConstrainVarSubReg(sub_id, sup), origin); } _ => { self.add_constraint(ConstrainRegSubReg(sub, sup), origin); @@ -758,8 +768,8 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { pub fn verify_generic_bound(&self, origin: SubregionOrigin<'tcx>, kind: GenericKind<'tcx>, - sub: Region, - bound: VerifyBound) { + sub: &'tcx Region, + bound: VerifyBound<'tcx>) { self.add_verify(Verify { kind: kind, origin: origin, @@ -768,29 +778,43 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { }); } - pub fn lub_regions(&self, origin: SubregionOrigin<'tcx>, a: Region, b: Region) -> Region { + pub fn lub_regions(&self, + origin: SubregionOrigin<'tcx>, + a: &'tcx Region, + b: &'tcx Region) + -> &'tcx Region { // cannot add constraints once regions are resolved assert!(self.values_are_none()); debug!("RegionVarBindings: lub_regions({:?}, {:?})", a, b); - if a == ty::ReStatic || b == ty::ReStatic { - ReStatic // nothing lives longer than static - } else if a == b { - a // LUB(a,a) = a - } else { - self.combine_vars(Lub, a, b, origin.clone(), |this, old_r, new_r| { - this.make_subregion(origin.clone(), old_r, new_r) - }) + match (a, b) { + (r @ &ReStatic, _) | (_, r @ &ReStatic) => { + r // nothing lives longer than static + } + + _ if a == b => { + a // LUB(a,a) = a + } + + _ => { + self.combine_vars(Lub, a, b, origin.clone(), |this, old_r, new_r| { + this.make_subregion(origin.clone(), old_r, new_r) + }) + } } } - pub fn glb_regions(&self, origin: SubregionOrigin<'tcx>, a: Region, b: Region) -> Region { + pub fn glb_regions(&self, + origin: SubregionOrigin<'tcx>, + a: &'tcx Region, + b: &'tcx Region) + -> &'tcx Region { // cannot add constraints once regions are resolved assert!(self.values_are_none()); debug!("RegionVarBindings: glb_regions({:?}, {:?})", a, b); match (a, b) { - (ReStatic, r) | (r, ReStatic) => { + (&ReStatic, r) | (r, &ReStatic) => { r // static lives longer than everything else } @@ -806,7 +830,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { } } - pub fn resolve_var(&self, rid: RegionVid) -> ty::Region { + pub fn resolve_var(&self, rid: RegionVid) -> &'tcx ty::Region { match *self.values.borrow() { None => { span_bug!((*self.var_origins.borrow())[rid.index as usize].span(), @@ -814,18 +838,19 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { been computed!") } Some(ref values) => { - let r = lookup(values, rid); + let r = lookup(self.tcx, values, rid); debug!("resolve_var({:?}) = {:?}", rid, r); r } } } - pub fn opportunistic_resolve_var(&self, rid: RegionVid) -> ty::Region { - ty::ReVar(self.unification_table.borrow_mut().find_value(rid).min_vid) + pub fn opportunistic_resolve_var(&self, rid: RegionVid) -> &'tcx ty::Region { + let vid = self.unification_table.borrow_mut().find_value(rid).min_vid; + self.tcx.mk_region(ty::ReVar(vid)) } - fn combine_map(&self, t: CombineMapType) -> &RefCell { + fn combine_map(&self, t: CombineMapType) -> &RefCell> { match t { Glb => &self.glbs, Lub => &self.lubs, @@ -834,26 +859,26 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { pub fn combine_vars(&self, t: CombineMapType, - a: Region, - b: Region, + a: &'tcx Region, + b: &'tcx Region, origin: SubregionOrigin<'tcx>, mut relate: F) - -> Region - where F: FnMut(&RegionVarBindings<'a, 'gcx, 'tcx>, Region, Region) + -> &'tcx Region + where F: FnMut(&RegionVarBindings<'a, 'gcx, 'tcx>, &'tcx Region, &'tcx Region) { let vars = TwoRegions { a: a, b: b }; if let Some(&c) = self.combine_map(t).borrow().get(&vars) { - return ReVar(c); + return self.tcx.mk_region(ReVar(c)); } let c = self.new_region_var(MiscVariable(origin.span())); self.combine_map(t).borrow_mut().insert(vars, c); if self.in_snapshot() { self.undo_log.borrow_mut().push(AddCombination(t, vars)); } - relate(self, a, ReVar(c)); - relate(self, b, ReVar(c)); + relate(self, a, self.tcx.mk_region(ReVar(c))); + relate(self, b, self.tcx.mk_region(ReVar(c))); debug!("combine_vars() c={:?}", c); - ReVar(c) + self.tcx.mk_region(ReVar(c)) } pub fn vars_created_since_snapshot(&self, mark: &RegionSnapshot) -> Vec { @@ -878,9 +903,9 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { /// related to other regions. pub fn tainted(&self, mark: &RegionSnapshot, - r0: Region, + r0: &'tcx Region, directions: TaintDirections) - -> FnvHashSet { + -> FnvHashSet<&'tcx ty::Region> { debug!("tainted(mark={:?}, r0={:?}, directions={:?})", mark, r0, directions); @@ -888,7 +913,8 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { // edges and add any new regions we find to result_set. This // is not a terribly efficient implementation. let mut taint_set = TaintSet::new(directions, r0); - taint_set.fixed_point(&self.undo_log.borrow()[mark.length..], + taint_set.fixed_point(self.tcx, + &self.undo_log.borrow()[mark.length..], &self.verifys.borrow()); debug!("tainted: result={:?}", taint_set.regions); return taint_set.into_set(); @@ -910,26 +936,30 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { errors } - fn lub_concrete_regions(&self, free_regions: &FreeRegionMap, a: Region, b: Region) -> Region { + fn lub_concrete_regions(&self, + free_regions: &FreeRegionMap, + a: &'tcx Region, + b: &'tcx Region) + -> &'tcx Region { match (a, b) { - (ReLateBound(..), _) | - (_, ReLateBound(..)) | - (ReEarlyBound(..), _) | - (_, ReEarlyBound(..)) | - (ReErased, _) | - (_, ReErased) => { + (&ReLateBound(..), _) | + (_, &ReLateBound(..)) | + (&ReEarlyBound(..), _) | + (_, &ReEarlyBound(..)) | + (&ReErased, _) | + (_, &ReErased) => { bug!("cannot relate region: LUB({:?}, {:?})", a, b); } - (ReStatic, _) | (_, ReStatic) => { - ReStatic // nothing lives longer than static + (r @ &ReStatic, _) | (_, r @ &ReStatic) => { + r // nothing lives longer than static } - (ReEmpty, r) | (r, ReEmpty) => { + (&ReEmpty, r) | (r, &ReEmpty) => { r // everything lives longer than empty } - (ReVar(v_id), _) | (_, ReVar(v_id)) => { + (&ReVar(v_id), _) | (_, &ReVar(v_id)) => { span_bug!((*self.var_origins.borrow())[v_id.index as usize].span(), "lub_concrete_regions invoked with non-concrete \ regions: {:?}, {:?}", @@ -937,9 +967,8 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { b); } - (ReFree(ref fr), ReScope(s_id)) | - (ReScope(s_id), ReFree(ref fr)) => { - let f = ReFree(*fr); + (&ReFree(fr), &ReScope(s_id)) | + (&ReScope(s_id), &ReFree(fr)) => { // A "free" region can be interpreted as "some region // at least as big as the block fr.scope_id". So, we can // reasonably compare free regions and scopes: @@ -949,33 +978,34 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { // if the free region's scope `fr.scope_id` is bigger than // the scope region `s_id`, then the LUB is the free // region itself: - f + self.tcx.mk_region(ReFree(fr)) } else { // otherwise, we don't know what the free region is, // so we must conservatively say the LUB is static: - ReStatic + self.tcx.mk_region(ReStatic) } } - (ReScope(a_id), ReScope(b_id)) => { + (&ReScope(a_id), &ReScope(b_id)) => { // The region corresponding to an outer block is a // subtype of the region corresponding to an inner // block. - ReScope(self.tcx.region_maps.nearest_common_ancestor(a_id, b_id)) + self.tcx.mk_region(ReScope( + self.tcx.region_maps.nearest_common_ancestor(a_id, b_id))) } - (ReFree(a_fr), ReFree(b_fr)) => { - free_regions.lub_free_regions(a_fr, b_fr) + (&ReFree(a_fr), &ReFree(b_fr)) => { + self.tcx.mk_region(free_regions.lub_free_regions(a_fr, b_fr)) } // For these types, we cannot define any additional // relationship: - (ReSkolemized(..), _) | - (_, ReSkolemized(..)) => { + (&ReSkolemized(..), _) | + (_, &ReSkolemized(..)) => { if a == b { a } else { - ReStatic + self.tcx.mk_region(ReStatic) } } } @@ -985,24 +1015,24 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { // ______________________________________________________________________ #[derive(Copy, Clone, Debug)] -pub enum VarValue { - Value(Region), +pub enum VarValue<'tcx> { + Value(&'tcx Region), ErrorValue, } struct RegionAndOrigin<'tcx> { - region: Region, + region: &'tcx Region, origin: SubregionOrigin<'tcx>, } -type RegionGraph = graph::Graph<(), Constraint>; +type RegionGraph<'tcx> = graph::Graph<(), Constraint<'tcx>>; impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { fn infer_variable_values(&self, free_regions: &FreeRegionMap, errors: &mut Vec>, subject: ast::NodeId) - -> Vec { + -> Vec> { let mut var_data = self.construct_var_data(); // Dorky hack to cause `dump_constraints` to only get called @@ -1020,9 +1050,9 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { var_data } - fn construct_var_data(&self) -> Vec { + fn construct_var_data(&self) -> Vec> { (0..self.num_vars() as usize) - .map(|_| Value(ty::ReEmpty)) + .map(|_| Value(self.tcx.mk_region(ty::ReEmpty))) .collect() } @@ -1059,7 +1089,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { } } - fn expansion(&self, free_regions: &FreeRegionMap, var_values: &mut [VarValue]) { + fn expansion(&self, free_regions: &FreeRegionMap, var_values: &mut [VarValue<'tcx>]) { self.iterate_until_fixed_point("Expansion", |constraint, origin| { debug!("expansion: constraint={:?} origin={:?}", constraint, origin); @@ -1089,9 +1119,9 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { fn expand_node(&self, free_regions: &FreeRegionMap, - a_region: Region, + a_region: &'tcx Region, b_vid: RegionVid, - b_data: &mut VarValue) + b_data: &mut VarValue<'tcx>) -> bool { debug!("expand_node({:?}, {:?} == {:?})", a_region, @@ -1099,7 +1129,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { b_data); // Check if this relationship is implied by a given. - match a_region { + match *a_region { ty::ReFree(fr) => { if self.givens.borrow().contains(&(fr, b_vid)) { debug!("given"); @@ -1136,7 +1166,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { /// and check that they are satisfied. fn collect_errors(&self, free_regions: &FreeRegionMap, - var_data: &mut Vec, + var_data: &mut Vec>, errors: &mut Vec>) { let constraints = self.constraints.borrow(); for (constraint, origin) in constraints.iter() { @@ -1192,7 +1222,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { for verify in self.verifys.borrow().iter() { debug!("collect_errors: verify={:?}", verify); - let sub = normalize(var_data, verify.region); + let sub = normalize(self.tcx, var_data, verify.region); if verify.bound.is_met(self.tcx, free_regions, var_data, sub) { continue; } @@ -1213,8 +1243,8 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { /// and create a `RegionResolutionError` for each of them. fn collect_var_errors(&self, free_regions: &FreeRegionMap, - var_data: &[VarValue], - graph: &RegionGraph, + var_data: &[VarValue<'tcx>], + graph: &RegionGraph<'tcx>, errors: &mut Vec>) { debug!("collect_var_errors"); @@ -1271,7 +1301,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { } } - fn construct_graph(&self) -> RegionGraph { + fn construct_graph(&self) -> RegionGraph<'tcx> { let num_vars = self.num_vars(); let constraints = self.constraints.borrow(); @@ -1315,7 +1345,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { fn collect_error_for_expanding_node(&self, free_regions: &FreeRegionMap, - graph: &RegionGraph, + graph: &RegionGraph<'tcx>, dup_vec: &mut [u32], node_idx: RegionVid, errors: &mut Vec>) { @@ -1339,10 +1369,10 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { // the user will more likely get a specific suggestion. fn free_regions_first(a: &RegionAndOrigin, b: &RegionAndOrigin) -> Ordering { match (a.region, b.region) { - (ReFree(..), ReFree(..)) => Equal, - (ReFree(..), _) => Less, - (_, ReFree(..)) => Greater, - (_, _) => Equal, + (&ReFree(..), &ReFree(..)) => Equal, + (&ReFree(..), _) => Less, + (_, &ReFree(..)) => Greater, + (..) => Equal, } } lower_bounds.sort_by(|a, b| free_regions_first(a, b)); @@ -1378,7 +1408,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { } fn collect_concrete_regions(&self, - graph: &RegionGraph, + graph: &RegionGraph<'tcx>, orig_node_idx: RegionVid, dir: Direction, dup_vec: &mut [u32]) @@ -1423,7 +1453,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { fn process_edges<'a, 'gcx, 'tcx>(this: &RegionVarBindings<'a, 'gcx, 'tcx>, state: &mut WalkState<'tcx>, - graph: &RegionGraph, + graph: &RegionGraph<'tcx>, source_vid: RegionVid, dir: Direction) { debug!("process_edges(source_vid={:?}, dir={:?})", source_vid, dir); @@ -1460,7 +1490,7 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { } fn iterate_until_fixed_point(&self, tag: &str, mut body: F) - where F: FnMut(&Constraint, &SubregionOrigin<'tcx>) -> bool + where F: FnMut(&Constraint<'tcx>, &SubregionOrigin<'tcx>) -> bool { let mut iteration = 0; let mut changed = true; @@ -1481,17 +1511,23 @@ impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { } -fn normalize(values: &Vec, r: ty::Region) -> ty::Region { - match r { - ty::ReVar(rid) => lookup(values, rid), +fn normalize<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + values: &Vec>, + r: &'tcx ty::Region) + -> &'tcx ty::Region { + match *r { + ty::ReVar(rid) => lookup(tcx, values, rid), _ => r, } } -fn lookup(values: &Vec, rid: ty::RegionVid) -> ty::Region { +fn lookup<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + values: &Vec>, + rid: ty::RegionVid) + -> &'tcx ty::Region { match values[rid.index as usize] { Value(r) => r, - ErrorValue => ReStatic, // Previously reported error. + ErrorValue => tcx.mk_region(ReStatic), // Previously reported error. } } @@ -1535,8 +1571,8 @@ impl<'a, 'gcx, 'tcx> GenericKind<'tcx> { } } -impl<'a, 'gcx, 'tcx> VerifyBound { - fn for_each_region(&self, f: &mut FnMut(ty::Region)) { +impl<'a, 'gcx, 'tcx> VerifyBound<'tcx> { + fn for_each_region(&self, f: &mut FnMut(&'tcx ty::Region)) { match self { &VerifyBound::AnyRegion(ref rs) | &VerifyBound::AllRegions(ref rs) => for &r in rs { @@ -1552,7 +1588,7 @@ impl<'a, 'gcx, 'tcx> VerifyBound { pub fn must_hold(&self) -> bool { match self { - &VerifyBound::AnyRegion(ref bs) => bs.contains(&ty::ReStatic), + &VerifyBound::AnyRegion(ref bs) => bs.contains(&&ty::ReStatic), &VerifyBound::AllRegions(ref bs) => bs.is_empty(), &VerifyBound::AnyBound(ref bs) => bs.iter().any(|b| b.must_hold()), &VerifyBound::AllBounds(ref bs) => bs.iter().all(|b| b.must_hold()), @@ -1562,13 +1598,13 @@ impl<'a, 'gcx, 'tcx> VerifyBound { pub fn cannot_hold(&self) -> bool { match self { &VerifyBound::AnyRegion(ref bs) => bs.is_empty(), - &VerifyBound::AllRegions(ref bs) => bs.contains(&ty::ReEmpty), + &VerifyBound::AllRegions(ref bs) => bs.contains(&&ty::ReEmpty), &VerifyBound::AnyBound(ref bs) => bs.iter().all(|b| b.cannot_hold()), &VerifyBound::AllBounds(ref bs) => bs.iter().any(|b| b.cannot_hold()), } } - pub fn or(self, vb: VerifyBound) -> VerifyBound { + pub fn or(self, vb: VerifyBound<'tcx>) -> VerifyBound<'tcx> { if self.must_hold() || vb.cannot_hold() { self } else if self.cannot_hold() || vb.must_hold() { @@ -1578,7 +1614,7 @@ impl<'a, 'gcx, 'tcx> VerifyBound { } } - pub fn and(self, vb: VerifyBound) -> VerifyBound { + pub fn and(self, vb: VerifyBound<'tcx>) -> VerifyBound<'tcx> { if self.must_hold() && vb.must_hold() { self } else if self.cannot_hold() && vb.cannot_hold() { @@ -1590,18 +1626,18 @@ impl<'a, 'gcx, 'tcx> VerifyBound { fn is_met(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, free_regions: &FreeRegionMap, - var_values: &Vec, - min: ty::Region) + var_values: &Vec>, + min: &'tcx ty::Region) -> bool { match self { &VerifyBound::AnyRegion(ref rs) => rs.iter() - .map(|&r| normalize(var_values, r)) + .map(|&r| normalize(tcx, var_values, r)) .any(|r| free_regions.is_subregion_of(tcx, min, r)), &VerifyBound::AllRegions(ref rs) => rs.iter() - .map(|&r| normalize(var_values, r)) + .map(|&r| normalize(tcx, var_values, r)) .all(|r| free_regions.is_subregion_of(tcx, min, r)), &VerifyBound::AnyBound(ref bs) => diff --git a/src/librustc/infer/resolve.rs b/src/librustc/infer/resolve.rs index 5f550b427e..357a03a2ff 100644 --- a/src/librustc/infer/resolve.rs +++ b/src/librustc/infer/resolve.rs @@ -72,10 +72,10 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for OpportunisticTypeAndRegionResolv } } - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - match r { - ty::ReVar(rid) => self.infcx.region_vars.opportunistic_resolve_var(rid), - _ => r, + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + match *r { + ty::ReVar(rid) => self.infcx.region_vars.opportunistic_resolve_var(rid), + _ => r, } } } @@ -138,10 +138,10 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for FullTypeResolver<'a, 'gcx, 'tcx> } } - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - match r { - ty::ReVar(rid) => self.infcx.region_vars.resolve_var(rid), - _ => r, + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + match *r { + ty::ReVar(rid) => self.infcx.region_vars.resolve_var(rid), + _ => r, } } } diff --git a/src/librustc/infer/sub.rs b/src/librustc/infer/sub.rs index 2f7f525472..159de2face 100644 --- a/src/librustc/infer/sub.rs +++ b/src/librustc/infer/sub.rs @@ -107,7 +107,8 @@ impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> } } - fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> { + fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> { debug!("{}.regions({:?}, {:?}) self.cause={:?}", self.tag(), a, b, self.fields.cause); // FIXME -- we have more fine-grained information available diff --git a/src/librustc/infer/type_variable.rs b/src/librustc/infer/type_variable.rs index 09ae16540c..da9fd1cff2 100644 --- a/src/librustc/infer/type_variable.rs +++ b/src/librustc/infer/type_variable.rs @@ -267,7 +267,7 @@ impl<'tcx> TypeVariableTable<'tcx> { debug!("NewElem({}) new_elem_threshold={}", index, new_elem_threshold); } - sv::UndoLog::Other(SpecifyVar(vid, _, _)) => { + sv::UndoLog::Other(SpecifyVar(vid, ..)) => { if vid.index < new_elem_threshold { // quick check to see if this variable was // created since the snapshot started or not. diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 48ea953cc1..25731df477 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -24,11 +24,14 @@ #![cfg_attr(not(stage0), deny(warnings))] #![feature(associated_consts)] +#![feature(borrow_state)] #![feature(box_patterns)] #![feature(box_syntax)] #![feature(collections)] +#![feature(conservative_impl_trait)] #![feature(const_fn)] #![feature(core_intrinsics)] +#![feature(dotdot_in_tuple_patterns)] #![feature(enumset)] #![feature(libc)] #![feature(nonzero)] @@ -37,7 +40,7 @@ #![feature(rustc_private)] #![feature(slice_patterns)] #![feature(staged_api)] -#![feature(question_mark)] +#![cfg_attr(stage0, feature(question_mark))] #![cfg_attr(test, feature(test))] extern crate arena; @@ -47,7 +50,6 @@ extern crate fmt_macros; extern crate getopts; extern crate graphviz; extern crate libc; -extern crate rbml; extern crate rustc_llvm as llvm; extern crate rustc_back; extern crate rustc_data_structures; diff --git a/src/librustc/lint/builtin.rs b/src/librustc/lint/builtin.rs index 3230a08c27..d378772e65 100644 --- a/src/librustc/lint/builtin.rs +++ b/src/librustc/lint/builtin.rs @@ -157,7 +157,7 @@ declare_lint! { declare_lint! { pub TRANSMUTE_FROM_FN_ITEM_TYPES, - Warn, + Deny, "transmute from function item type to pointer-sized type erroneously allowed" } @@ -187,21 +187,15 @@ declare_lint! { } declare_lint! { - pub UNSIZED_IN_TUPLE, - Warn, - "unsized types in the interior of a tuple were erroneously allowed" -} - -declare_lint! { - pub OBJECT_UNSAFE_FRAGMENT, + pub LIFETIME_UNDERSCORE, Warn, - "object-unsafe non-principal fragments in object types were erroneously allowed" + "lifetimes or labels named `'_` were erroneously allowed" } declare_lint! { - pub LIFETIME_UNDERSCORE, + pub SAFE_EXTERN_STATICS, Warn, - "lifetimes or labels named `'_` were erroneously allowed" + "safe access to extern statics was erroneously allowed" } /// Does nothing as a lint pass, but registers some `Lint`s @@ -239,10 +233,9 @@ impl LintPass for HardwiredLints { OVERLAPPING_INHERENT_IMPLS, RENAMED_AND_REMOVED_LINTS, SUPER_OR_SELF_IN_GLOBAL_PATH, - UNSIZED_IN_TUPLE, - OBJECT_UNSAFE_FRAGMENT, HR_LIFETIME_IN_ASSOC_TYPE, - LIFETIME_UNDERSCORE + LIFETIME_UNDERSCORE, + SAFE_EXTERN_STATICS ) } } diff --git a/src/librustc/lint/context.rs b/src/librustc/lint/context.rs index daac315e14..81d3d440b5 100644 --- a/src/librustc/lint/context.rs +++ b/src/librustc/lint/context.rs @@ -38,7 +38,7 @@ use util::nodemap::FnvHashMap; use std::cmp; use std::default::Default as StdDefault; use std::mem; -use syntax::attr::{self, AttrMetaMethods}; +use syntax::attr; use syntax::parse::token::InternedString; use syntax::ast; use syntax_pos::Span; @@ -372,12 +372,10 @@ pub fn gather_attr(attr: &ast::Attribute) return out; }; - for meta in metas { - out.push(if meta.is_word() { - Ok((meta.name().clone(), level, meta.span)) - } else { - Err(meta.span) - }); + for li in metas { + out.push(li.word().map_or(Err(li.span), |word| { + Ok((word.name().clone(), level, word.span)) + })); } out @@ -601,16 +599,17 @@ pub trait LintContext: Sized { for (lint_id, level, span) in v { let (now, now_source) = self.lints().get_level_source(lint_id); if now == Forbid && level != Forbid { - let lint_name = lint_id.as_str(); + let lint_name = lint_id.to_string(); let mut diag_builder = struct_span_err!(self.sess(), span, E0453, "{}({}) overruled by outer forbid({})", level.as_str(), lint_name, lint_name); + diag_builder.span_label(span, &format!("overruled by previous forbid")); match now_source { LintSource::Default => &mut diag_builder, LintSource::Node(forbid_source_span) => { - diag_builder.span_note(forbid_source_span, - "`forbid` lint level set here") + diag_builder.span_label(forbid_source_span, + &format!("`forbid` level set here")) }, LintSource::CommandLine => { diag_builder.note("`forbid` lint level was set on command line") @@ -1216,7 +1215,7 @@ pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, for &(lint, span, ref msg) in v { span_bug!(span, "unprocessed lint {} at {}: {}", - lint.as_str(), tcx.map.node_to_string(*id), *msg) + lint.to_string(), tcx.map.node_to_string(*id), *msg) } } @@ -1252,7 +1251,7 @@ pub fn check_ast_crate(sess: &Session, krate: &ast::Crate) { // in the iteration code. for (_, v) in sess.lints.borrow().iter() { for &(lint, span, ref msg) in v { - span_bug!(span, "unprocessed lint {}: {}", lint.as_str(), *msg) + span_bug!(span, "unprocessed lint {}: {}", lint.to_string(), *msg) } } } diff --git a/src/librustc/lint/mod.rs b/src/librustc/lint/mod.rs index f34b14224f..0938086b00 100644 --- a/src/librustc/lint/mod.rs +++ b/src/librustc/lint/mod.rs @@ -263,7 +263,7 @@ impl LintId { } /// Get the name of the lint. - pub fn as_str(&self) -> String { + pub fn to_string(&self) -> String { self.lint.name_lower() } } diff --git a/src/librustc/middle/cstore.rs b/src/librustc/middle/cstore.rs index dec6f36084..107cf9b6ca 100644 --- a/src/librustc/middle/cstore.rs +++ b/src/librustc/middle/cstore.rs @@ -23,22 +23,23 @@ // probably get a better home if someone can find one. use hir::def::{self, Def}; -use hir::def_id::{DefId, DefIndex}; +use hir::def_id::{CrateNum, DefId, DefIndex}; use hir::map as hir_map; -use hir::map::definitions::DefKey; +use hir::map::definitions::{Definitions, DefKey}; use hir::svh::Svh; use middle::lang_items; -use ty::{self, Ty, TyCtxt, VariantKind}; +use ty::{self, Ty, TyCtxt}; use mir::repr::Mir; use mir::mir_map::MirMap; use session::Session; use session::config::PanicStrategy; use session::search_paths::PathKind; -use util::nodemap::{FnvHashMap, NodeSet, DefIdMap}; -use std::rc::Rc; +use util::nodemap::{NodeSet, DefIdMap}; use std::path::PathBuf; +use std::rc::Rc; use syntax::ast; use syntax::attr; +use syntax::ext::base::MultiItemModifier; use syntax::ptr::P; use syntax::parse::token::InternedString; use syntax_pos::Span; @@ -46,7 +47,6 @@ use rustc_back::target::Target; use hir; use hir::intravisit::Visitor; -pub use self::DefLike::{DlDef, DlField, DlImpl}; pub use self::NativeLibraryKind::{NativeStatic, NativeFramework, NativeUnknown}; // lonely orphan structs and enums looking for a better home @@ -63,30 +63,20 @@ pub struct LinkMeta { pub struct CrateSource { pub dylib: Option<(PathBuf, PathKind)>, pub rlib: Option<(PathBuf, PathKind)>, - pub cnum: ast::CrateNum, + pub cnum: CrateNum, } -#[derive(Copy, Debug, PartialEq, Clone)] +#[derive(Copy, Debug, PartialEq, Clone, RustcEncodable, RustcDecodable)] pub enum LinkagePreference { RequireDynamic, RequireStatic, } -enum_from_u32! { - #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] - pub enum NativeLibraryKind { - NativeStatic, // native static library (.a archive) - NativeFramework, // OSX-specific - NativeUnknown, // default way to specify a dynamic library - } -} - -// Something that a name can resolve to. -#[derive(Copy, Clone, Debug)] -pub enum DefLike { - DlDef(Def), - DlImpl(DefId), - DlField +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +pub enum NativeLibraryKind { + NativeStatic, // native static library (.a archive) + NativeFramework, // OSX-specific + NativeUnknown, // default way to specify a dynamic library } /// The data we save and restore about an inlined item or method. This is not @@ -96,28 +86,15 @@ pub enum DefLike { pub enum InlinedItem { Item(DefId /* def-id in source crate */, P), TraitItem(DefId /* impl id */, P), - ImplItem(DefId /* impl id */, P), - Foreign(DefId /* extern item */, P), + ImplItem(DefId /* impl id */, P) } /// A borrowed version of `hir::InlinedItem`. -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, Hash, Debug)] pub enum InlinedItemRef<'a> { Item(DefId, &'a hir::Item), TraitItem(DefId, &'a hir::TraitItem), - ImplItem(DefId, &'a hir::ImplItem), - Foreign(DefId, &'a hir::ForeignItem) -} - -/// Item definitions in the currently-compiled crate would have the CrateNum -/// LOCAL_CRATE in their DefId. -pub const LOCAL_CRATE: ast::CrateNum = 0; - -#[derive(Copy, Clone)] -pub struct ChildItem { - pub def: DefLike, - pub name: ast::Name, - pub vis: ty::Visibility, + ImplItem(DefId, &'a hir::ImplItem) } #[derive(Copy, Clone, Debug)] @@ -145,102 +122,89 @@ pub struct ExternCrate { /// can be accessed. pub trait CrateStore<'tcx> { // item info + fn describe_def(&self, def: DefId) -> Option; fn stability(&self, def: DefId) -> Option; fn deprecation(&self, def: DefId) -> Option; fn visibility(&self, def: DefId) -> ty::Visibility; fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind; fn closure_ty<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ty::ClosureTy<'tcx>; - fn item_variances(&self, def: DefId) -> ty::ItemVariances; - fn repr_attrs(&self, def: DefId) -> Vec; + fn item_variances(&self, def: DefId) -> Vec; fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) - -> ty::TypeScheme<'tcx>; + -> Ty<'tcx>; fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap>; - fn item_name(&self, def: DefId) -> ast::Name; - fn opt_item_name(&self, def: DefId) -> Option; fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::GenericPredicates<'tcx>; fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::GenericPredicates<'tcx>; + fn item_generics<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> ty::Generics<'tcx>; fn item_attrs(&self, def_id: DefId) -> Vec; fn trait_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)-> ty::TraitDef<'tcx>; fn adt_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>; - fn method_arg_names(&self, did: DefId) -> Vec; + fn fn_arg_names(&self, did: DefId) -> Vec; fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec; // trait info - fn implementations_of_trait(&self, def_id: DefId) -> Vec; - fn provided_trait_methods<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) - -> Vec>>; - fn trait_item_def_ids(&self, def: DefId) - -> Vec; + fn implementations_of_trait(&self, filter: Option) -> Vec; // impl info - fn impl_items(&self, impl_def_id: DefId) -> Vec; + fn impl_or_trait_items(&self, def_id: DefId) -> Vec; fn impl_trait_ref<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> Option>; - fn impl_polarity(&self, def: DefId) -> Option; + fn impl_polarity(&self, def: DefId) -> hir::ImplPolarity; fn custom_coerce_unsized_kind(&self, def: DefId) -> Option; - fn associated_consts<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) - -> Vec>>; fn impl_parent(&self, impl_def_id: DefId) -> Option; // trait/impl-item info - fn trait_of_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) - -> Option; + fn trait_of_item(&self, def_id: DefId) -> Option; fn impl_or_trait_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> Option>; // flags fn is_const_fn(&self, did: DefId) -> bool; fn is_defaulted_trait(&self, did: DefId) -> bool; - fn is_impl(&self, did: DefId) -> bool; fn is_default_impl(&self, impl_did: DefId) -> bool; fn is_extern_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, did: DefId) -> bool; fn is_foreign_item(&self, did: DefId) -> bool; - fn is_static_method(&self, did: DefId) -> bool; fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool; - fn is_typedef(&self, did: DefId) -> bool; // crate metadata - fn dylib_dependency_formats(&self, cnum: ast::CrateNum) - -> Vec<(ast::CrateNum, LinkagePreference)>; - fn lang_items(&self, cnum: ast::CrateNum) -> Vec<(DefIndex, usize)>; - fn missing_lang_items(&self, cnum: ast::CrateNum) -> Vec; - fn is_staged_api(&self, cnum: ast::CrateNum) -> bool; - fn is_explicitly_linked(&self, cnum: ast::CrateNum) -> bool; - fn is_allocator(&self, cnum: ast::CrateNum) -> bool; - fn is_panic_runtime(&self, cnum: ast::CrateNum) -> bool; - fn panic_strategy(&self, cnum: ast::CrateNum) -> PanicStrategy; - fn extern_crate(&self, cnum: ast::CrateNum) -> Option; - fn crate_attrs(&self, cnum: ast::CrateNum) -> Vec; + fn dylib_dependency_formats(&self, cnum: CrateNum) + -> Vec<(CrateNum, LinkagePreference)>; + fn lang_items(&self, cnum: CrateNum) -> Vec<(DefIndex, usize)>; + fn missing_lang_items(&self, cnum: CrateNum) -> Vec; + fn is_staged_api(&self, cnum: CrateNum) -> bool; + fn is_explicitly_linked(&self, cnum: CrateNum) -> bool; + fn is_allocator(&self, cnum: CrateNum) -> bool; + fn is_panic_runtime(&self, cnum: CrateNum) -> bool; + fn is_compiler_builtins(&self, cnum: CrateNum) -> bool; + fn panic_strategy(&self, cnum: CrateNum) -> PanicStrategy; + fn extern_crate(&self, cnum: CrateNum) -> Option; /// The name of the crate as it is referred to in source code of the current /// crate. - fn crate_name(&self, cnum: ast::CrateNum) -> InternedString; + fn crate_name(&self, cnum: CrateNum) -> InternedString; /// The name of the crate as it is stored in the crate's metadata. - fn original_crate_name(&self, cnum: ast::CrateNum) -> InternedString; - fn crate_hash(&self, cnum: ast::CrateNum) -> Svh; - fn crate_disambiguator(&self, cnum: ast::CrateNum) -> InternedString; - fn crate_struct_field_attrs(&self, cnum: ast::CrateNum) - -> FnvHashMap>; - fn plugin_registrar_fn(&self, cnum: ast::CrateNum) -> Option; - fn native_libraries(&self, cnum: ast::CrateNum) -> Vec<(NativeLibraryKind, String)>; - fn reachable_ids(&self, cnum: ast::CrateNum) -> Vec; + fn original_crate_name(&self, cnum: CrateNum) -> InternedString; + fn crate_hash(&self, cnum: CrateNum) -> Svh; + fn crate_disambiguator(&self, cnum: CrateNum) -> InternedString; + fn plugin_registrar_fn(&self, cnum: CrateNum) -> Option; + fn native_libraries(&self, cnum: CrateNum) -> Vec<(NativeLibraryKind, String)>; + fn reachable_ids(&self, cnum: CrateNum) -> Vec; + fn is_no_builtins(&self, cnum: CrateNum) -> bool; // resolve fn def_index_for_def_key(&self, - cnum: ast::CrateNum, + cnum: CrateNum, def: DefKey) -> Option; fn def_key(&self, def: DefId) -> hir_map::DefKey; - fn relative_def_path(&self, def: DefId) -> hir_map::DefPath; - fn variant_kind(&self, def_id: DefId) -> Option; + fn relative_def_path(&self, def: DefId) -> Option; + fn variant_kind(&self, def_id: DefId) -> Option; fn struct_ctor_def_id(&self, struct_def_id: DefId) -> Option; - fn tuple_struct_definition_if_ctor(&self, did: DefId) -> Option; fn struct_field_names(&self, def: DefId) -> Vec; - fn item_children(&self, did: DefId) -> Vec; - fn crate_top_level_items(&self, cnum: ast::CrateNum) -> Vec; + fn item_children(&self, did: DefId) -> Vec; // misc. metadata fn maybe_get_item_ast<'a>(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) @@ -254,27 +218,21 @@ pub trait CrateStore<'tcx> { // This is basically a 1-based range of ints, which is a little // silly - I may fix that. - fn crates(&self) -> Vec; + fn crates(&self) -> Vec; fn used_libraries(&self) -> Vec<(String, NativeLibraryKind)>; fn used_link_args(&self) -> Vec; // utility functions fn metadata_filename(&self) -> &str; fn metadata_section_name(&self, target: &Target) -> &str; - fn encode_type<'a>(&self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - def_id_to_string: for<'b> fn(TyCtxt<'b, 'tcx, 'tcx>, DefId) -> String) - -> Vec; - fn used_crates(&self, prefer: LinkagePreference) -> Vec<(ast::CrateNum, Option)>; - fn used_crate_source(&self, cnum: ast::CrateNum) -> CrateSource; - fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option; + fn used_crates(&self, prefer: LinkagePreference) -> Vec<(CrateNum, Option)>; + fn used_crate_source(&self, cnum: CrateNum) -> CrateSource; + fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option; fn encode_metadata<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, reexports: &def::ExportMap, link_meta: &LinkMeta, reachable: &NodeSet, - mir_map: &MirMap<'tcx>, - krate: &hir::Crate) -> Vec; + mir_map: &MirMap<'tcx>) -> Vec; fn metadata_encoding_version(&self) -> &[u8]; } @@ -284,7 +242,6 @@ impl InlinedItem { { match *self { InlinedItem::Item(_, ref i) => visitor.visit_item(&i), - InlinedItem::Foreign(_, ref i) => visitor.visit_foreign_item(&i), InlinedItem::TraitItem(_, ref ti) => visitor.visit_trait_item(ti), InlinedItem::ImplItem(_, ref ii) => visitor.visit_impl_item(ii), } @@ -324,123 +281,108 @@ pub struct DummyCrateStore; #[allow(unused_variables)] impl<'tcx> CrateStore<'tcx> for DummyCrateStore { // item info + fn describe_def(&self, def: DefId) -> Option { bug!("describe_def") } fn stability(&self, def: DefId) -> Option { bug!("stability") } fn deprecation(&self, def: DefId) -> Option { bug!("deprecation") } fn visibility(&self, def: DefId) -> ty::Visibility { bug!("visibility") } - fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind { bug!("closure_kind") } + fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind { bug!("closure_kind") } fn closure_ty<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ty::ClosureTy<'tcx> { bug!("closure_ty") } - fn item_variances(&self, def: DefId) -> ty::ItemVariances { bug!("item_variances") } - fn repr_attrs(&self, def: DefId) -> Vec { bug!("repr_attrs") } + fn item_variances(&self, def: DefId) -> Vec { bug!("item_variances") } fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) - -> ty::TypeScheme<'tcx> { bug!("item_type") } + -> Ty<'tcx> { bug!("item_type") } fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap> { bug!("visible_parent_map") } - fn item_name(&self, def: DefId) -> ast::Name { bug!("item_name") } - fn opt_item_name(&self, def: DefId) -> Option { bug!("opt_item_name") } fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::GenericPredicates<'tcx> { bug!("item_predicates") } fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::GenericPredicates<'tcx> { bug!("item_super_predicates") } + fn item_generics<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> ty::Generics<'tcx> { bug!("item_generics") } fn item_attrs(&self, def_id: DefId) -> Vec { bug!("item_attrs") } fn trait_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)-> ty::TraitDef<'tcx> { bug!("trait_def") } fn adt_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx> { bug!("adt_def") } - fn method_arg_names(&self, did: DefId) -> Vec { bug!("method_arg_names") } + fn fn_arg_names(&self, did: DefId) -> Vec { bug!("fn_arg_names") } fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec { vec![] } // trait info - fn implementations_of_trait(&self, def_id: DefId) -> Vec { vec![] } - fn provided_trait_methods<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) - -> Vec>> { bug!("provided_trait_methods") } - fn trait_item_def_ids(&self, def: DefId) - -> Vec { bug!("trait_item_def_ids") } + fn implementations_of_trait(&self, filter: Option) -> Vec { vec![] } fn def_index_for_def_key(&self, - cnum: ast::CrateNum, + cnum: CrateNum, def: DefKey) -> Option { None } // impl info - fn impl_items(&self, impl_def_id: DefId) -> Vec - { bug!("impl_items") } + fn impl_or_trait_items(&self, def_id: DefId) -> Vec + { bug!("impl_or_trait_items") } fn impl_trait_ref<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> Option> { bug!("impl_trait_ref") } - fn impl_polarity(&self, def: DefId) -> Option { bug!("impl_polarity") } + fn impl_polarity(&self, def: DefId) -> hir::ImplPolarity { bug!("impl_polarity") } fn custom_coerce_unsized_kind(&self, def: DefId) -> Option { bug!("custom_coerce_unsized_kind") } - fn associated_consts<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) - -> Vec>> { bug!("associated_consts") } fn impl_parent(&self, def: DefId) -> Option { bug!("impl_parent") } // trait/impl-item info - fn trait_of_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) - -> Option { bug!("trait_of_item") } + fn trait_of_item(&self, def_id: DefId) -> Option { bug!("trait_of_item") } fn impl_or_trait_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> Option> { bug!("impl_or_trait_item") } // flags fn is_const_fn(&self, did: DefId) -> bool { bug!("is_const_fn") } fn is_defaulted_trait(&self, did: DefId) -> bool { bug!("is_defaulted_trait") } - fn is_impl(&self, did: DefId) -> bool { bug!("is_impl") } fn is_default_impl(&self, impl_did: DefId) -> bool { bug!("is_default_impl") } fn is_extern_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, did: DefId) -> bool { bug!("is_extern_item") } fn is_foreign_item(&self, did: DefId) -> bool { bug!("is_foreign_item") } - fn is_static_method(&self, did: DefId) -> bool { bug!("is_static_method") } fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool { false } - fn is_typedef(&self, did: DefId) -> bool { bug!("is_typedef") } // crate metadata - fn dylib_dependency_formats(&self, cnum: ast::CrateNum) - -> Vec<(ast::CrateNum, LinkagePreference)> + fn dylib_dependency_formats(&self, cnum: CrateNum) + -> Vec<(CrateNum, LinkagePreference)> { bug!("dylib_dependency_formats") } - fn lang_items(&self, cnum: ast::CrateNum) -> Vec<(DefIndex, usize)> + fn lang_items(&self, cnum: CrateNum) -> Vec<(DefIndex, usize)> { bug!("lang_items") } - fn missing_lang_items(&self, cnum: ast::CrateNum) -> Vec + fn missing_lang_items(&self, cnum: CrateNum) -> Vec { bug!("missing_lang_items") } - fn is_staged_api(&self, cnum: ast::CrateNum) -> bool { bug!("is_staged_api") } - fn is_explicitly_linked(&self, cnum: ast::CrateNum) -> bool { bug!("is_explicitly_linked") } - fn is_allocator(&self, cnum: ast::CrateNum) -> bool { bug!("is_allocator") } - fn is_panic_runtime(&self, cnum: ast::CrateNum) -> bool { bug!("is_panic_runtime") } - fn panic_strategy(&self, cnum: ast::CrateNum) -> PanicStrategy { + fn is_staged_api(&self, cnum: CrateNum) -> bool { bug!("is_staged_api") } + fn is_explicitly_linked(&self, cnum: CrateNum) -> bool { bug!("is_explicitly_linked") } + fn is_allocator(&self, cnum: CrateNum) -> bool { bug!("is_allocator") } + fn is_panic_runtime(&self, cnum: CrateNum) -> bool { bug!("is_panic_runtime") } + fn is_compiler_builtins(&self, cnum: CrateNum) -> bool { bug!("is_compiler_builtins") } + fn panic_strategy(&self, cnum: CrateNum) -> PanicStrategy { bug!("panic_strategy") } - fn extern_crate(&self, cnum: ast::CrateNum) -> Option { bug!("extern_crate") } - fn crate_attrs(&self, cnum: ast::CrateNum) -> Vec - { bug!("crate_attrs") } - fn crate_name(&self, cnum: ast::CrateNum) -> InternedString { bug!("crate_name") } - fn original_crate_name(&self, cnum: ast::CrateNum) -> InternedString { + fn extern_crate(&self, cnum: CrateNum) -> Option { bug!("extern_crate") } + fn crate_name(&self, cnum: CrateNum) -> InternedString { bug!("crate_name") } + fn original_crate_name(&self, cnum: CrateNum) -> InternedString { bug!("original_crate_name") } - fn crate_hash(&self, cnum: ast::CrateNum) -> Svh { bug!("crate_hash") } - fn crate_disambiguator(&self, cnum: ast::CrateNum) + fn crate_hash(&self, cnum: CrateNum) -> Svh { bug!("crate_hash") } + fn crate_disambiguator(&self, cnum: CrateNum) -> InternedString { bug!("crate_disambiguator") } - fn crate_struct_field_attrs(&self, cnum: ast::CrateNum) - -> FnvHashMap> - { bug!("crate_struct_field_attrs") } - fn plugin_registrar_fn(&self, cnum: ast::CrateNum) -> Option + fn plugin_registrar_fn(&self, cnum: CrateNum) -> Option { bug!("plugin_registrar_fn") } - fn native_libraries(&self, cnum: ast::CrateNum) -> Vec<(NativeLibraryKind, String)> + fn native_libraries(&self, cnum: CrateNum) -> Vec<(NativeLibraryKind, String)> { bug!("native_libraries") } - fn reachable_ids(&self, cnum: ast::CrateNum) -> Vec { bug!("reachable_ids") } + fn reachable_ids(&self, cnum: CrateNum) -> Vec { bug!("reachable_ids") } + fn is_no_builtins(&self, cnum: CrateNum) -> bool { bug!("is_no_builtins") } // resolve fn def_key(&self, def: DefId) -> hir_map::DefKey { bug!("def_key") } - fn relative_def_path(&self, def: DefId) -> hir_map::DefPath { bug!("relative_def_path") } - fn variant_kind(&self, def_id: DefId) -> Option { bug!("variant_kind") } + fn relative_def_path(&self, def: DefId) -> Option { + bug!("relative_def_path") + } + fn variant_kind(&self, def_id: DefId) -> Option { bug!("variant_kind") } fn struct_ctor_def_id(&self, struct_def_id: DefId) -> Option { bug!("struct_ctor_def_id") } - fn tuple_struct_definition_if_ctor(&self, did: DefId) -> Option - { bug!("tuple_struct_definition_if_ctor") } fn struct_field_names(&self, def: DefId) -> Vec { bug!("struct_field_names") } - fn item_children(&self, did: DefId) -> Vec { bug!("item_children") } - fn crate_top_level_items(&self, cnum: ast::CrateNum) -> Vec - { bug!("crate_top_level_items") } + fn item_children(&self, did: DefId) -> Vec { bug!("item_children") } // misc. metadata fn maybe_get_item_ast<'a>(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) @@ -462,184 +404,32 @@ impl<'tcx> CrateStore<'tcx> for DummyCrateStore { // This is basically a 1-based range of ints, which is a little // silly - I may fix that. - fn crates(&self) -> Vec { vec![] } + fn crates(&self) -> Vec { vec![] } fn used_libraries(&self) -> Vec<(String, NativeLibraryKind)> { vec![] } fn used_link_args(&self) -> Vec { vec![] } // utility functions fn metadata_filename(&self) -> &str { bug!("metadata_filename") } fn metadata_section_name(&self, target: &Target) -> &str { bug!("metadata_section_name") } - fn encode_type<'a>(&self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - def_id_to_string: for<'b> fn(TyCtxt<'b, 'tcx, 'tcx>, DefId) -> String) - -> Vec { - bug!("encode_type") - } - fn used_crates(&self, prefer: LinkagePreference) -> Vec<(ast::CrateNum, Option)> + fn used_crates(&self, prefer: LinkagePreference) -> Vec<(CrateNum, Option)> { vec![] } - fn used_crate_source(&self, cnum: ast::CrateNum) -> CrateSource { bug!("used_crate_source") } - fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option { None } + fn used_crate_source(&self, cnum: CrateNum) -> CrateSource { bug!("used_crate_source") } + fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option { None } fn encode_metadata<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, reexports: &def::ExportMap, link_meta: &LinkMeta, reachable: &NodeSet, - mir_map: &MirMap<'tcx>, - krate: &hir::Crate) -> Vec { vec![] } + mir_map: &MirMap<'tcx>) -> Vec { vec![] } fn metadata_encoding_version(&self) -> &[u8] { bug!("metadata_encoding_version") } } +pub enum LoadedMacro { + Def(ast::MacroDef), + CustomDerive(String, Rc), +} -/// Metadata encoding and decoding can make use of thread-local encoding and -/// decoding contexts. These allow implementers of serialize::Encodable and -/// Decodable to access information and datastructures that would otherwise not -/// be available to them. For example, we can automatically translate def-id and -/// span information during decoding because the decoding context knows which -/// crate the data is decoded from. Or it allows to make ty::Ty decodable -/// because the context has access to the TyCtxt that is needed for creating -/// ty::Ty instances. -/// -/// Note, however, that this only works for RBML-based encoding and decoding at -/// the moment. -pub mod tls { - use rbml::opaque::Encoder as OpaqueEncoder; - use rbml::opaque::Decoder as OpaqueDecoder; - use serialize; - use std::cell::Cell; - use std::mem; - use ty::{self, Ty, TyCtxt}; - use ty::subst::Substs; - use hir::def_id::DefId; - - pub trait EncodingContext<'tcx> { - fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>; - fn encode_ty(&self, encoder: &mut OpaqueEncoder, t: Ty<'tcx>); - fn encode_substs(&self, encoder: &mut OpaqueEncoder, substs: &Substs<'tcx>); - } - - /// Marker type used for the TLS slot. - /// The type context cannot be used directly because the TLS - /// in libstd doesn't allow types generic over lifetimes. - struct TlsPayload; - - thread_local! { - static TLS_ENCODING: Cell> = Cell::new(None) - } - - /// Execute f after pushing the given EncodingContext onto the TLS stack. - pub fn enter_encoding_context<'tcx, F, R>(ecx: &EncodingContext<'tcx>, - encoder: &mut OpaqueEncoder, - f: F) -> R - where F: FnOnce(&EncodingContext<'tcx>, &mut OpaqueEncoder) -> R - { - let tls_payload = (ecx as *const _, encoder as *mut _); - let tls_ptr = &tls_payload as *const _ as *const TlsPayload; - TLS_ENCODING.with(|tls| { - let prev = tls.get(); - tls.set(Some(tls_ptr)); - let ret = f(ecx, encoder); - tls.set(prev); - return ret - }) - } - - /// Execute f with access to the thread-local encoding context and - /// rbml encoder. This function will panic if the encoder passed in and the - /// context encoder are not the same. - /// - /// Note that this method is 'practically' safe due to its checking that the - /// encoder passed in is the same as the one in TLS, but it would still be - /// possible to construct cases where the EncodingContext is exchanged - /// while the same encoder is used, thus working with a wrong context. - pub fn with_encoding_context<'tcx, E, F, R>(encoder: &mut E, f: F) -> R - where F: FnOnce(&EncodingContext<'tcx>, &mut OpaqueEncoder) -> R, - E: serialize::Encoder - { - unsafe { - unsafe_with_encoding_context(|ecx, tls_encoder| { - assert!(encoder as *mut _ as usize == tls_encoder as *mut _ as usize); - - let ecx: &EncodingContext<'tcx> = mem::transmute(ecx); - - f(ecx, tls_encoder) - }) - } - } - - /// Execute f with access to the thread-local encoding context and - /// rbml encoder. - pub unsafe fn unsafe_with_encoding_context(f: F) -> R - where F: FnOnce(&EncodingContext, &mut OpaqueEncoder) -> R - { - TLS_ENCODING.with(|tls| { - let tls = tls.get().unwrap(); - let tls_payload = tls as *mut (&EncodingContext, &mut OpaqueEncoder); - f((*tls_payload).0, (*tls_payload).1) - }) - } - - pub trait DecodingContext<'tcx> { - fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>; - fn decode_ty(&self, decoder: &mut OpaqueDecoder) -> ty::Ty<'tcx>; - fn decode_substs(&self, decoder: &mut OpaqueDecoder) -> Substs<'tcx>; - fn translate_def_id(&self, def_id: DefId) -> DefId; - } - - thread_local! { - static TLS_DECODING: Cell> = Cell::new(None) - } - - /// Execute f after pushing the given DecodingContext onto the TLS stack. - pub fn enter_decoding_context<'tcx, F, R>(dcx: &DecodingContext<'tcx>, - decoder: &mut OpaqueDecoder, - f: F) -> R - where F: FnOnce(&DecodingContext<'tcx>, &mut OpaqueDecoder) -> R - { - let tls_payload = (dcx as *const _, decoder as *mut _); - let tls_ptr = &tls_payload as *const _ as *const TlsPayload; - TLS_DECODING.with(|tls| { - let prev = tls.get(); - tls.set(Some(tls_ptr)); - let ret = f(dcx, decoder); - tls.set(prev); - return ret - }) - } - - /// Execute f with access to the thread-local decoding context and - /// rbml decoder. This function will panic if the decoder passed in and the - /// context decoder are not the same. - /// - /// Note that this method is 'practically' safe due to its checking that the - /// decoder passed in is the same as the one in TLS, but it would still be - /// possible to construct cases where the DecodingContext is exchanged - /// while the same decoder is used, thus working with a wrong context. - pub fn with_decoding_context<'decoder, 'tcx, D, F, R>(d: &'decoder mut D, f: F) -> R - where D: serialize::Decoder, - F: FnOnce(&DecodingContext<'tcx>, - &mut OpaqueDecoder) -> R, - 'tcx: 'decoder - { - unsafe { - unsafe_with_decoding_context(|dcx, decoder| { - assert!((d as *mut _ as usize) == (decoder as *mut _ as usize)); - - let dcx: &DecodingContext<'tcx> = mem::transmute(dcx); - - f(dcx, decoder) - }) - } - } - - /// Execute f with access to the thread-local decoding context and - /// rbml decoder. - pub unsafe fn unsafe_with_decoding_context(f: F) -> R - where F: FnOnce(&DecodingContext, &mut OpaqueDecoder) -> R - { - TLS_DECODING.with(|tls| { - let tls = tls.get().unwrap(); - let tls_payload = tls as *mut (&DecodingContext, &mut OpaqueDecoder); - f((*tls_payload).0, (*tls_payload).1) - }) - } +pub trait CrateLoader { + fn load_macros(&mut self, extern_crate: &ast::Item, allows_macros: bool) -> Vec; + fn process_item(&mut self, item: &ast::Item, defs: &Definitions); + fn postprocess(&mut self, krate: &ast::Crate); } diff --git a/src/librustc/middle/dataflow.rs b/src/librustc/middle/dataflow.rs index fc1294c86c..7f3a58808c 100644 --- a/src/librustc/middle/dataflow.rs +++ b/src/librustc/middle/dataflow.rs @@ -112,10 +112,10 @@ impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O ps: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { let id = match node { - pprust::NodeName(_) => 0, + pprust::NodeName(_) => ast::CRATE_NODE_ID, pprust::NodeExpr(expr) => expr.id, pprust::NodeBlock(blk) => blk.id, - pprust::NodeItem(_) | pprust::NodeSubItem(_) => 0, + pprust::NodeItem(_) | pprust::NodeSubItem(_) => ast::CRATE_NODE_ID, pprust::NodePat(pat) => pat.id }; diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index 2b59e603cc..30a0c6a9dc 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -22,8 +22,8 @@ use ty::{self, TyCtxt}; use hir::def::Def; use hir::def_id::{DefId}; use lint; +use util::nodemap::FnvHashSet; -use std::collections::HashSet; use syntax::{ast, codemap}; use syntax::attr; use syntax_pos; @@ -48,7 +48,7 @@ fn should_explore<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, struct MarkSymbolVisitor<'a, 'tcx: 'a> { worklist: Vec, tcx: TyCtxt<'a, 'tcx, 'tcx>, - live_symbols: Box>, + live_symbols: Box>, struct_has_extern_repr: bool, ignore_non_const_paths: bool, inherited_pub_visibility: bool, @@ -61,7 +61,7 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { MarkSymbolVisitor { worklist: worklist, tcx: tcx, - live_symbols: box HashSet::new(), + live_symbols: box FnvHashSet(), struct_has_extern_repr: false, ignore_non_const_paths: false, inherited_pub_visibility: false, @@ -86,17 +86,21 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { } fn lookup_and_handle_definition(&mut self, id: ast::NodeId) { - use ty::TypeVariants::{TyEnum, TyStruct}; + let def = self.tcx.expect_def(id); // If `bar` is a trait item, make sure to mark Foo as alive in `Foo::bar` - self.tcx.tables.borrow().item_substs.get(&id) - .and_then(|substs| substs.substs.self_ty()) - .map(|ty| match ty.sty { - TyEnum(tyid, _) | TyStruct(tyid, _) => self.check_def_id(tyid.did), - _ => (), - }); + match def { + Def::AssociatedTy(..) | Def::Method(_) | Def::AssociatedConst(_) + if self.tcx.trait_of_item(def.def_id()).is_some() => { + if let Some(substs) = self.tcx.tables.borrow().item_substs.get(&id) { + if let ty::TyAdt(tyid, _) = substs.substs.type_at(0).sty { + self.check_def_id(tyid.did); + } + } + } + _ => {} + } - let def = self.tcx.expect_def(id); match def { Def::Const(_) | Def::AssociatedConst(..) => { self.check_def_id(def.def_id()); @@ -104,8 +108,10 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { _ if self.ignore_non_const_paths => (), Def::PrimTy(_) => (), Def::SelfTy(..) => (), - Def::Variant(enum_id, variant_id) => { - self.check_def_id(enum_id); + Def::Variant(variant_id) => { + if let Some(enum_id) = self.tcx.parent_def_id(variant_id) { + self.check_def_id(enum_id); + } if !self.ignore_variant_stack.contains(&variant_id) { self.check_def_id(variant_id); } @@ -123,23 +129,28 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { } fn handle_field_access(&mut self, lhs: &hir::Expr, name: ast::Name) { - if let ty::TyStruct(def, _) = self.tcx.expr_ty_adjusted(lhs).sty { - self.insert_def_id(def.struct_variant().field_named(name).did); - } else { - span_bug!(lhs.span, "named field access on non-struct") + match self.tcx.expr_ty_adjusted(lhs).sty { + ty::TyAdt(def, _) => { + self.insert_def_id(def.struct_variant().field_named(name).did); + } + _ => span_bug!(lhs.span, "named field access on non-ADT"), } } fn handle_tup_field_access(&mut self, lhs: &hir::Expr, idx: usize) { - if let ty::TyStruct(def, _) = self.tcx.expr_ty_adjusted(lhs).sty { - self.insert_def_id(def.struct_variant().fields[idx].did); + match self.tcx.expr_ty_adjusted(lhs).sty { + ty::TyAdt(def, _) => { + self.insert_def_id(def.struct_variant().fields[idx].did); + } + ty::TyTuple(..) => {} + _ => span_bug!(lhs.span, "numeric field access on non-ADT"), } } fn handle_field_pattern_match(&mut self, lhs: &hir::Pat, pats: &[codemap::Spanned]) { let variant = match self.tcx.node_id_to_type(lhs.id).sty { - ty::TyStruct(adt, _) | ty::TyEnum(adt, _) => { + ty::TyAdt(adt, _) => { adt.variant_of_def(self.tcx.expect_def(lhs.id)) } _ => span_bug!(lhs.span, "non-ADT in struct pattern") @@ -153,7 +164,7 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { } fn mark_live_symbols(&mut self) { - let mut scanned = HashSet::new(); + let mut scanned = FnvHashSet(); while !self.worklist.is_empty() { let id = self.worklist.pop().unwrap(); if scanned.contains(&id) { @@ -176,7 +187,7 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { match *node { ast_map::NodeItem(item) => { match item.node { - hir::ItemStruct(..) => { + hir::ItemStruct(..) | hir::ItemUnion(..) => { self.struct_has_extern_repr = item.attrs.iter().any(|attr| { attr::find_repr_attrs(self.tcx.sess.diagnostic(), attr) .contains(&attr::ReprExtern) @@ -285,7 +296,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for MarkSymbolVisitor<'a, 'tcx> { } fn visit_path_list_item(&mut self, path: &hir::Path, item: &hir::PathListItem) { - self.lookup_and_handle_definition(item.node.id()); + self.lookup_and_handle_definition(item.node.id); intravisit::walk_path_list_item(self, path, item); } } @@ -334,7 +345,7 @@ impl<'v> Visitor<'v> for LifeSeeder { self.worklist.extend(enum_def.variants.iter() .map(|variant| variant.node.data.id())); } - hir::ItemTrait(_, _, _, ref trait_items) => { + hir::ItemTrait(.., ref trait_items) => { for trait_item in trait_items { match trait_item.node { hir::ConstTraitItem(_, Some(_)) | @@ -347,7 +358,7 @@ impl<'v> Visitor<'v> for LifeSeeder { } } } - hir::ItemImpl(_, _, _, ref opt_trait, _, ref impl_items) => { + hir::ItemImpl(.., ref opt_trait, _, ref impl_items) => { for impl_item in impl_items { if opt_trait.is_some() || has_allow_dead_code_or_lang_attr(&impl_item.attrs) { @@ -386,7 +397,7 @@ fn create_and_seed_worklist<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn find_live<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, access_levels: &privacy::AccessLevels, krate: &hir::Crate) - -> Box> { + -> Box> { let worklist = create_and_seed_worklist(tcx, access_levels, krate); let mut symbol_visitor = MarkSymbolVisitor::new(tcx, worklist); symbol_visitor.mark_live_symbols(); @@ -404,7 +415,7 @@ fn get_struct_ctor_id(item: &hir::Item) -> Option { struct DeadVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, - live_symbols: Box>, + live_symbols: Box>, } impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { @@ -414,7 +425,8 @@ impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { | hir::ItemConst(..) | hir::ItemFn(..) | hir::ItemEnum(..) - | hir::ItemStruct(..) => true, + | hir::ItemStruct(..) + | hir::ItemUnion(..) => true, _ => false }; let ctor_id = get_struct_ctor_id(item); @@ -460,13 +472,12 @@ impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { // This is done to handle the case where, for example, the static // method of a private type is used, but the type itself is never // called directly. - let impl_items = self.tcx.impl_items.borrow(); + let impl_items = self.tcx.impl_or_trait_item_def_ids.borrow(); if let Some(impl_list) = self.tcx.inherent_impls.borrow().get(&self.tcx.map.local_def_id(id)) { for impl_did in impl_list.iter() { - for item_did in impl_items.get(impl_did).unwrap().iter() { - if let Some(item_node_id) = - self.tcx.map.as_local_node_id(item_did.def_id()) { + for &item_did in &impl_items[impl_did][..] { + if let Some(item_node_id) = self.tcx.map.as_local_node_id(item_did) { if self.live_symbols.contains(&item_node_id) { return true; } @@ -537,7 +548,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for DeadVisitor<'a, 'tcx> { fn visit_struct_field(&mut self, field: &hir::StructField) { if self.should_warn_about_field(&field) { self.warn_dead_code(field.id, field.span, - field.name, "struct field"); + field.name, "field"); } intravisit::walk_struct_field(self, field); diff --git a/src/librustc/middle/dependency_format.rs b/src/librustc/middle/dependency_format.rs index cf6905ecf4..c6908e11ed 100644 --- a/src/librustc/middle/dependency_format.rs +++ b/src/librustc/middle/dependency_format.rs @@ -61,7 +61,7 @@ //! Additionally, the algorithm is geared towards finding *any* solution rather //! than finding a number of solutions (there are normally quite a few). -use syntax::ast; +use hir::def_id::CrateNum; use session; use session::config::{self, PanicStrategy}; @@ -139,8 +139,13 @@ fn calculate_type(sess: &session::Session, } } - // Everything else falls through below - config::CrateTypeExecutable | config::CrateTypeDylib => {}, + // Everything else falls through below. This will happen either with the + // `-C prefer-dynamic` or because we're a rustc-macro crate. Note that + // rustc-macro crates are required to be dylibs, and they're currently + // required to link to libsyntax as well. + config::CrateTypeExecutable | + config::CrateTypeDylib | + config::CrateTypeRustcMacro => {}, } let mut formats = FnvHashMap(); @@ -164,9 +169,9 @@ fn calculate_type(sess: &session::Session, } // Collect what we've got so far in the return vector. - let last_crate = sess.cstore.crates().len() as ast::CrateNum; + let last_crate = sess.cstore.crates().len(); let mut ret = (1..last_crate+1).map(|cnum| { - match formats.get(&cnum) { + match formats.get(&CrateNum::new(cnum)) { Some(&RequireDynamic) => Linkage::Dynamic, Some(&RequireStatic) => Linkage::IncludedFromDylib, None => Linkage::NotLinked, @@ -186,7 +191,7 @@ fn calculate_type(sess: &session::Session, assert!(src.rlib.is_some()); info!("adding staticlib: {}", sess.cstore.crate_name(cnum)); add_library(sess, cnum, RequireStatic, &mut formats); - ret[cnum as usize - 1] = Linkage::Static; + ret[cnum.as_usize() - 1] = Linkage::Static; } } @@ -208,7 +213,7 @@ fn calculate_type(sess: &session::Session, // For situations like this, we perform one last pass over the dependencies, // making sure that everything is available in the requested format. for (cnum, kind) in ret.iter().enumerate() { - let cnum = (cnum + 1) as ast::CrateNum; + let cnum = CrateNum::new(cnum + 1); let src = sess.cstore.used_crate_source(cnum); match *kind { Linkage::NotLinked | @@ -232,9 +237,9 @@ fn calculate_type(sess: &session::Session, } fn add_library(sess: &session::Session, - cnum: ast::CrateNum, + cnum: CrateNum, link: LinkagePreference, - m: &mut FnvHashMap) { + m: &mut FnvHashMap) { match m.get(&cnum) { Some(&link2) => { // If the linkages differ, then we'd have two copies of the library @@ -264,9 +269,9 @@ fn attempt_static(sess: &session::Session) -> Option { // All crates are available in an rlib format, so we're just going to link // everything in explicitly so long as it's actually required. - let last_crate = sess.cstore.crates().len() as ast::CrateNum; + let last_crate = sess.cstore.crates().len(); let mut ret = (1..last_crate+1).map(|cnum| { - if sess.cstore.is_explicitly_linked(cnum) { + if sess.cstore.is_explicitly_linked(CrateNum::new(cnum)) { Linkage::Static } else { Linkage::NotLinked @@ -293,11 +298,11 @@ fn attempt_static(sess: &session::Session) -> Option { // a required dependency) in one of the session's field. If this field is not // set then this compilation doesn't actually need the dependency and we can // also skip this step entirely. -fn activate_injected_dep(injected: Option, +fn activate_injected_dep(injected: Option, list: &mut DependencyList, - replaces_injected: &Fn(ast::CrateNum) -> bool) { + replaces_injected: &Fn(CrateNum) -> bool) { for (i, slot) in list.iter().enumerate() { - let cnum = (i + 1) as ast::CrateNum; + let cnum = CrateNum::new(i + 1); if !replaces_injected(cnum) { continue } @@ -306,7 +311,7 @@ fn activate_injected_dep(injected: Option, } } if let Some(injected) = injected { - let idx = injected as usize - 1; + let idx = injected.as_usize() - 1; assert_eq!(list[idx], Linkage::NotLinked); list[idx] = Linkage::Static; } @@ -324,7 +329,7 @@ fn verify_ok(sess: &session::Session, list: &[Linkage]) { if let Linkage::NotLinked = *linkage { continue } - let cnum = (i + 1) as ast::CrateNum; + let cnum = CrateNum::new(i + 1); if sess.cstore.is_allocator(cnum) { if let Some(prev) = allocator { let prev_name = sess.cstore.crate_name(prev); @@ -375,7 +380,7 @@ fn verify_ok(sess: &session::Session, list: &[Linkage]) { if desired_strategy == PanicStrategy::Abort { continue } - let cnum = (i + 1) as ast::CrateNum; + let cnum = CrateNum::new(i + 1); let found_strategy = sess.cstore.panic_strategy(cnum); if desired_strategy == found_strategy { continue diff --git a/src/librustc/middle/effect.rs b/src/librustc/middle/effect.rs index 3ca6cf0399..2a75b6620f 100644 --- a/src/librustc/middle/effect.rs +++ b/src/librustc/middle/effect.rs @@ -13,15 +13,15 @@ use self::RootUnsafeContext::*; use dep_graph::DepNode; -use hir::def::Def; use ty::{self, Ty, TyCtxt}; use ty::MethodCall; +use lint; use syntax::ast; use syntax_pos::Span; -use hir; -use hir::intravisit; -use hir::intravisit::{FnKind, Visitor}; +use hir::{self, PatKind}; +use hir::def::Def; +use hir::intravisit::{self, FnKind, Visitor}; #[derive(Copy, Clone)] struct UnsafeContext { @@ -44,7 +44,7 @@ enum RootUnsafeContext { fn type_is_unsafe_function(ty: Ty) -> bool { match ty.sty { - ty::TyFnDef(_, _, ref f) | + ty::TyFnDef(.., ref f) | ty::TyFnPtr(ref f) => f.unsafety == hir::Unsafety::Unsafe, _ => false, } @@ -58,16 +58,25 @@ struct EffectCheckVisitor<'a, 'tcx: 'a> { } impl<'a, 'tcx> EffectCheckVisitor<'a, 'tcx> { - fn require_unsafe(&mut self, span: Span, description: &str) { + fn require_unsafe_ext(&mut self, node_id: ast::NodeId, span: Span, + description: &str, is_lint: bool) { if self.unsafe_context.push_unsafe_count > 0 { return; } match self.unsafe_context.root { SafeContext => { - // Report an error. - struct_span_err!( - self.tcx.sess, span, E0133, - "{} requires unsafe function or block", description) - .span_label(span, &format!("unsafe call requires unsafe function or block")) - .emit(); + if is_lint { + self.tcx.sess.add_lint(lint::builtin::SAFE_EXTERN_STATICS, + node_id, + span, + format!("{} requires unsafe function or \ + block (error E0133)", description)); + } else { + // Report an error. + struct_span_err!( + self.tcx.sess, span, E0133, + "{} requires unsafe function or block", description) + .span_label(span, &description) + .emit(); + } } UnsafeBlock(block_id) => { // OK, but record this. @@ -77,6 +86,10 @@ impl<'a, 'tcx> EffectCheckVisitor<'a, 'tcx> { UnsafeFn => {} } } + + fn require_unsafe(&mut self, span: Span, description: &str) { + self.require_unsafe_ext(ast::DUMMY_NODE_ID, span, description, false) + } } impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { @@ -84,9 +97,9 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { block: &'v hir::Block, span: Span, id: ast::NodeId) { let (is_item_fn, is_unsafe_fn) = match fn_kind { - FnKind::ItemFn(_, _, unsafety, _, _, _, _) => + FnKind::ItemFn(_, _, unsafety, ..) => (true, unsafety == hir::Unsafety::Unsafe), - FnKind::Method(_, sig, _, _) => + FnKind::Method(_, sig, ..) => (true, sig.unsafety == hir::Unsafety::Unsafe), _ => (false, false), }; @@ -144,7 +157,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { fn visit_expr(&mut self, expr: &hir::Expr) { match expr.node { - hir::ExprMethodCall(_, _, _) => { + hir::ExprMethodCall(..) => { let method_call = MethodCall::expr(expr.id); let base_type = self.tcx.tables.borrow().method_map[&method_call].ty; debug!("effect: method call case, base type is {:?}", @@ -174,8 +187,23 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { self.require_unsafe(expr.span, "use of inline assembly"); } hir::ExprPath(..) => { - if let Def::Static(_, true) = self.tcx.expect_def(expr.id) { - self.require_unsafe(expr.span, "use of mutable static"); + if let Def::Static(def_id, mutbl) = self.tcx.expect_def(expr.id) { + if mutbl { + self.require_unsafe(expr.span, "use of mutable static"); + } else if match self.tcx.map.get_if_local(def_id) { + Some(hir::map::NodeForeignItem(..)) => true, + Some(..) => false, + None => self.tcx.sess.cstore.is_foreign_item(def_id), + } { + self.require_unsafe_ext(expr.id, expr.span, "use of extern static", true); + } + } + } + hir::ExprField(ref base_expr, field) => { + if let ty::TyAdt(adt, ..) = self.tcx.expr_ty_adjusted(base_expr).sty { + if adt.is_union() { + self.require_unsafe(field.span, "access to union field"); + } } } _ => {} @@ -183,6 +211,20 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { intravisit::walk_expr(self, expr); } + + fn visit_pat(&mut self, pat: &hir::Pat) { + if let PatKind::Struct(_, ref fields, _) = pat.node { + if let ty::TyAdt(adt, ..) = self.tcx.pat_ty(pat).sty { + if adt.is_union() { + for field in fields { + self.require_unsafe(field.span, "matching on union field"); + } + } + } + } + + intravisit::walk_pat(self, pat); + } } pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { diff --git a/src/librustc/middle/expr_use_visitor.rs b/src/librustc/middle/expr_use_visitor.rs index 87463055a2..5b5c3da8f0 100644 --- a/src/librustc/middle/expr_use_visitor.rs +++ b/src/librustc/middle/expr_use_visitor.rs @@ -76,7 +76,7 @@ pub trait Delegate<'tcx> { borrow_id: ast::NodeId, borrow_span: Span, cmt: mc::cmt<'tcx>, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, bk: ty::BorrowKind, loan_cause: LoanCause); @@ -301,11 +301,11 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { for arg in &decl.inputs { let arg_ty = return_if_err!(self.mc.infcx.node_ty(arg.pat.id)); - let fn_body_scope = self.tcx().region_maps.node_extent(body.id); + let fn_body_scope_r = self.tcx().node_scope_region(body.id); let arg_cmt = self.mc.cat_rvalue( arg.id, arg.pat.span, - ty::ReScope(fn_body_scope), // Args live only as long as the fn body. + fn_body_scope_r, // Args live only as long as the fn body. arg_ty); self.walk_irrefutable_pat(arg_cmt, &arg.pat); @@ -352,7 +352,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { fn borrow_expr(&mut self, expr: &hir::Expr, - r: ty::Region, + r: &'tcx ty::Region, bk: ty::BorrowKind, cause: LoanCause) { debug!("borrow_expr(expr={:?}, r={:?}, bk={:?})", @@ -409,12 +409,12 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { self.consume_exprs(args); } - hir::ExprMethodCall(_, _, ref args) => { // callee.m(args) + hir::ExprMethodCall(.., ref args) => { // callee.m(args) self.consume_exprs(args); } hir::ExprStruct(_, ref fields, ref opt_with) => { - self.walk_struct_expr(expr, fields, opt_with); + self.walk_struct_expr(fields, opt_with); } hir::ExprTup(ref exprs) => { @@ -431,7 +431,8 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { hir::ExprMatch(ref discr, ref arms, _) => { let discr_cmt = return_if_err!(self.mc.cat_expr(&discr)); - self.borrow_expr(&discr, ty::ReEmpty, ty::ImmBorrow, MatchDiscriminant); + let r = self.tcx().mk_region(ty::ReEmpty); + self.borrow_expr(&discr, r, ty::ImmBorrow, MatchDiscriminant); // treatment of the discriminant is handled while walking the arms. for arm in arms { @@ -449,7 +450,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { // make sure that the thing we are pointing out stays valid // for the lifetime `scope_r` of the resulting ptr: let expr_ty = return_if_err!(self.mc.infcx.node_ty(expr.id)); - if let ty::TyRef(&r, _) = expr_ty.sty { + if let ty::TyRef(r, _) = expr_ty.sty { let bk = ty::BorrowKind::from_mutbl(m); self.borrow_expr(&base, r, bk, AddrOf); } @@ -543,7 +544,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { self.consume_expr(&count); } - hir::ExprClosure(_, _, _, fn_decl_span) => { + hir::ExprClosure(.., fn_decl_span) => { self.walk_captures(expr, fn_decl_span) } @@ -557,7 +558,6 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { let callee_ty = return_if_err!(self.mc.infcx.expr_ty_adjusted(callee)); debug!("walk_callee: callee={:?} callee_ty={:?}", callee, callee_ty); - let call_scope = self.tcx().region_maps.node_extent(call.id); match callee_ty.sty { ty::TyFnDef(..) | ty::TyFnPtr(_) => { self.consume_expr(callee); @@ -578,14 +578,16 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { }; match overloaded_call_type { FnMutOverloadedCall => { + let call_scope_r = self.tcx().node_scope_region(call.id); self.borrow_expr(callee, - ty::ReScope(call_scope), + call_scope_r, ty::MutBorrow, ClosureInvocation); } FnOverloadedCall => { + let call_scope_r = self.tcx().node_scope_region(call.id); self.borrow_expr(callee, - ty::ReScope(call_scope), + call_scope_r, ty::ImmBorrow, ClosureInvocation); } @@ -653,7 +655,6 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { } fn walk_struct_expr(&mut self, - _expr: &hir::Expr, fields: &[hir::Field], opt_with: &Option>) { // Consume the expressions supplying values for each field. @@ -670,30 +671,33 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { // Select just those fields of the `with` // expression that will actually be used - if let ty::TyStruct(def, substs) = with_cmt.ty.sty { - // Consume those fields of the with expression that are needed. - for with_field in &def.struct_variant().fields { - if !contains_field_named(with_field, fields) { - let cmt_field = self.mc.cat_field( - &*with_expr, - with_cmt.clone(), - with_field.name, - with_field.ty(self.tcx(), substs) - ); - self.delegate_consume(with_expr.id, with_expr.span, cmt_field); + match with_cmt.ty.sty { + ty::TyAdt(adt, substs) if adt.is_struct() => { + // Consume those fields of the with expression that are needed. + for with_field in &adt.struct_variant().fields { + if !contains_field_named(with_field, fields) { + let cmt_field = self.mc.cat_field( + &*with_expr, + with_cmt.clone(), + with_field.name, + with_field.ty(self.tcx(), substs) + ); + self.delegate_consume(with_expr.id, with_expr.span, cmt_field); + } } } - } else { - // the base expression should always evaluate to a - // struct; however, when EUV is run during typeck, it - // may not. This will generate an error earlier in typeck, - // so we can just ignore it. - if !self.tcx().sess.has_errors() { - span_bug!( - with_expr.span, - "with expression doesn't evaluate to a struct"); + _ => { + // the base expression should always evaluate to a + // struct; however, when EUV is run during typeck, it + // may not. This will generate an error earlier in typeck, + // so we can just ignore it. + if !self.tcx().sess.has_errors() { + span_bug!( + with_expr.span, + "with expression doesn't evaluate to a struct"); + } } - }; + } // walk the with expression so that complex expressions // are properly handled. @@ -761,7 +765,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { }; let bk = ty::BorrowKind::from_mutbl(m); self.delegate.borrow(expr.id, expr.span, cmt, - *r, bk, AutoRef); + r, bk, AutoRef); } } } @@ -822,7 +826,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { self.delegate.borrow(expr.id, expr.span, cmt_base, - *r, + r, ty::BorrowKind::from_mutbl(m), AutoRef); } @@ -835,7 +839,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { // Converting from a &T to *T (or &mut T to *mut T) is // treated as borrowing it for the enclosing temporary // scope. - let r = ty::ReScope(self.tcx().region_maps.node_extent(expr.id)); + let r = self.tcx().node_scope_region(expr.id); self.delegate.borrow(expr.id, expr.span, @@ -890,7 +894,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { // methods are implicitly autoref'd which sadly does not use // adjustments, so we must hardcode the borrow here. - let r = ty::ReScope(self.tcx().region_maps.node_extent(expr.id)); + let r = self.tcx().node_scope_region(expr.id); let bk = ty::ImmBorrow; for &arg in &rhs { @@ -939,9 +943,9 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { pat); return_if_err!(self.mc.cat_pattern(cmt_discr, pat, |_mc, cmt_pat, pat| { match pat.node { - PatKind::Binding(hir::BindByRef(..), _, _) => + PatKind::Binding(hir::BindByRef(..), ..) => mode.lub(BorrowingMatch), - PatKind::Binding(hir::BindByValue(..), _, _) => { + PatKind::Binding(hir::BindByValue(..), ..) => { match copy_or_move(self.mc.infcx, &cmt_pat, PatBindingMove) { Copy => mode.lub(CopyingMatch), Move(..) => mode.lub(MovingMatch), @@ -963,7 +967,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { let infcx = self.mc.infcx; let delegate = &mut self.delegate; return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |mc, cmt_pat, pat| { - if let PatKind::Binding(bmode, _, _) = pat.node { + if let PatKind::Binding(bmode, ..) = pat.node { debug!("binding cmt_pat={:?} pat={:?} match_mode={:?}", cmt_pat, pat, match_mode); // pat_ty: the type of the binding being produced. @@ -979,7 +983,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { // It is also a borrow or copy/move of the value being matched. match bmode { hir::BindByRef(m) => { - if let ty::TyRef(&r, _) = pat_ty.sty { + if let ty::TyRef(r, _) = pat_ty.sty { let bk = ty::BorrowKind::from_mutbl(m); delegate.borrow(pat.id, pat.span, cmt_pat, r, bk, RefBinding); } @@ -999,7 +1003,8 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { // the leaves of the pattern tree structure. return_if_err!(mc.cat_pattern(cmt_discr, pat, |mc, cmt_pat, pat| { match tcx.expect_def_or_none(pat.id) { - Some(Def::Variant(enum_did, variant_did)) => { + Some(Def::Variant(variant_did)) => { + let enum_did = tcx.parent_def_id(variant_did).unwrap(); let downcast_cmt = if tcx.lookup_adt_def(enum_did).is_univariant() { cmt_pat } else { @@ -1010,7 +1015,8 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { debug!("variant downcast_cmt={:?} pat={:?}", downcast_cmt, pat); delegate.matched_pat(pat, downcast_cmt, match_mode); } - Some(Def::Struct(..)) | Some(Def::TyAlias(..)) | Some(Def::AssociatedTy(..)) => { + Some(Def::Struct(..)) | Some(Def::Union(..)) | + Some(Def::TyAlias(..)) | Some(Def::AssociatedTy(..)) => { debug!("struct cmt_pat={:?} pat={:?}", cmt_pat, pat); delegate.matched_pat(pat, cmt_pat, match_mode); } @@ -1024,7 +1030,8 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { self.tcx().with_freevars(closure_expr.id, |freevars| { for freevar in freevars { - let id_var = freevar.def.var_id(); + let def_id = freevar.def.def_id(); + let id_var = self.tcx().map.as_local_node_id(def_id).unwrap(); let upvar_id = ty::UpvarId { var_id: id_var, closure_expr_id: closure_expr.id }; let upvar_capture = self.mc.infcx.upvar_capture(upvar_id).unwrap(); @@ -1056,7 +1063,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { -> mc::McResult> { // Create the cmt for the variable being borrowed, from the // caller's perspective - let var_id = upvar_def.var_id(); + let var_id = self.tcx().map.as_local_node_id(upvar_def.def_id()).unwrap(); let var_ty = self.mc.infcx.node_ty(var_id)?; self.mc.cat_def(closure_id, closure_span, var_ty, upvar_def) } diff --git a/src/librustc/middle/free_region.rs b/src/librustc/middle/free_region.rs index e4ce897671..bd35bfc982 100644 --- a/src/librustc/middle/free_region.rs +++ b/src/librustc/middle/free_region.rs @@ -37,7 +37,7 @@ impl FreeRegionMap { for implied_bound in implied_bounds { debug!("implied bound: {:?}", implied_bound); match *implied_bound { - ImpliedBound::RegionSubRegion(ty::ReFree(free_a), ty::ReFree(free_b)) => { + ImpliedBound::RegionSubRegion(&ty::ReFree(free_a), &ty::ReFree(free_b)) => { self.relate_free_regions(free_a, free_b); } ImpliedBound::RegionSubRegion(..) | @@ -55,7 +55,6 @@ impl FreeRegionMap { match *predicate { ty::Predicate::Projection(..) | ty::Predicate::Trait(..) | - ty::Predicate::Rfc1592(..) | ty::Predicate::Equate(..) | ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | @@ -65,9 +64,9 @@ impl FreeRegionMap { } ty::Predicate::RegionOutlives(ty::Binder(ty::OutlivesPredicate(r_a, r_b))) => { match (r_a, r_b) { - (ty::ReStatic, ty::ReFree(_)) => {}, - (ty::ReFree(fr_a), ty::ReStatic) => self.relate_to_static(fr_a), - (ty::ReFree(fr_a), ty::ReFree(fr_b)) => { + (&ty::ReStatic, &ty::ReFree(_)) => {}, + (&ty::ReFree(fr_a), &ty::ReStatic) => self.relate_to_static(fr_a), + (&ty::ReFree(fr_a), &ty::ReFree(fr_b)) => { // Record that `'a:'b`. Or, put another way, `'b <= 'a`. self.relate_free_regions(fr_b, fr_a); } @@ -122,26 +121,26 @@ impl FreeRegionMap { /// inference* and sadly the logic is somewhat duplicated with the code in infer.rs. pub fn is_subregion_of(&self, tcx: TyCtxt, - sub_region: ty::Region, - super_region: ty::Region) + sub_region: &ty::Region, + super_region: &ty::Region) -> bool { let result = sub_region == super_region || { match (sub_region, super_region) { - (ty::ReEmpty, _) | - (_, ty::ReStatic) => + (&ty::ReEmpty, _) | + (_, &ty::ReStatic) => true, - (ty::ReScope(sub_scope), ty::ReScope(super_scope)) => + (&ty::ReScope(sub_scope), &ty::ReScope(super_scope)) => tcx.region_maps.is_subscope_of(sub_scope, super_scope), - (ty::ReScope(sub_scope), ty::ReFree(fr)) => + (&ty::ReScope(sub_scope), &ty::ReFree(fr)) => tcx.region_maps.is_subscope_of(sub_scope, fr.scope) || self.is_static(fr), - (ty::ReFree(sub_fr), ty::ReFree(super_fr)) => + (&ty::ReFree(sub_fr), &ty::ReFree(super_fr)) => self.sub_free_region(sub_fr, super_fr), - (ty::ReStatic, ty::ReFree(sup_fr)) => + (&ty::ReStatic, &ty::ReFree(sup_fr)) => self.is_static(sup_fr), _ => diff --git a/src/librustc/middle/intrinsicck.rs b/src/librustc/middle/intrinsicck.rs index a1a4f15b9f..1acd0fb0f7 100644 --- a/src/librustc/middle/intrinsicck.rs +++ b/src/librustc/middle/intrinsicck.rs @@ -52,7 +52,7 @@ struct ExprVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { impl<'a, 'gcx, 'tcx> ExprVisitor<'a, 'gcx, 'tcx> { fn def_id_is_transmute(&self, def_id: DefId) -> bool { let intrinsic = match self.infcx.tcx.lookup_item_type(def_id).ty.sty { - ty::TyFnDef(_, _, ref bfty) => bfty.abi == RustIntrinsic, + ty::TyFnDef(.., ref bfty) => bfty.abi == RustIntrinsic, _ => return false }; intrinsic && self.infcx.tcx.item_name(def_id).as_str() == "transmute" @@ -103,11 +103,16 @@ impl<'a, 'gcx, 'tcx> ExprVisitor<'a, 'gcx, 'tcx> { } }; - span_err!(self.infcx.tcx.sess, span, E0512, + struct_span_err!(self.infcx.tcx.sess, span, E0512, "transmute called with differently sized types: \ {} ({}) to {} ({})", from, skeleton_string(from, sk_from), - to, skeleton_string(to, sk_to)); + to, skeleton_string(to, sk_to)) + .span_label(span, + &format!("transmuting between {} and {}", + skeleton_string(from, sk_from), + skeleton_string(to, sk_to))) + .emit(); } } @@ -160,7 +165,7 @@ impl<'a, 'gcx, 'tcx, 'v> Visitor<'v> for ExprVisitor<'a, 'gcx, 'tcx> { Def::Fn(did) if self.def_id_is_transmute(did) => { let typ = self.infcx.tcx.node_id_to_type(expr.id); match typ.sty { - ty::TyFnDef(_, _, ref bare_fn_ty) if bare_fn_ty.abi == RustIntrinsic => { + ty::TyFnDef(.., ref bare_fn_ty) if bare_fn_ty.abi == RustIntrinsic => { let from = bare_fn_ty.sig.0.inputs[0]; let to = bare_fn_ty.sig.0.output; self.check_transmute(expr.span, from, to, expr.id); diff --git a/src/librustc/middle/lang_items.rs b/src/librustc/middle/lang_items.rs index a209b1d1ab..078cce9c49 100644 --- a/src/librustc/middle/lang_items.rs +++ b/src/librustc/middle/lang_items.rs @@ -30,7 +30,6 @@ use middle::weak_lang_items; use util::nodemap::FnvHashMap; use syntax::ast; -use syntax::attr::AttrMetaMethods; use syntax::parse::token::InternedString; use hir::intravisit::Visitor; use hir; @@ -44,7 +43,7 @@ macro_rules! language_item_table { enum_from_u32! { - #[derive(Copy, Clone, PartialEq, Eq, Hash)] + #[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum LangItem { $($variant,)* } diff --git a/src/librustc/middle/liveness.rs b/src/librustc/middle/liveness.rs index b83826de26..db9dd82d49 100644 --- a/src/librustc/middle/liveness.rs +++ b/src/librustc/middle/liveness.rs @@ -465,7 +465,8 @@ fn visit_expr(ir: &mut IrMaps, expr: &Expr) { let mut call_caps = Vec::new(); ir.tcx.with_freevars(expr.id, |freevars| { for fv in freevars { - if let Def::Local(_, rv) = fv.def { + if let Def::Local(def_id) = fv.def { + let rv = ir.tcx.map.as_local_node_id(def_id).unwrap(); let fv_ln = ir.add_live_node(FreeVarNode(fv.span)); call_caps.push(CaptureInfo {ln: fv_ln, var_nid: rv}); @@ -482,7 +483,7 @@ fn visit_expr(ir: &mut IrMaps, expr: &Expr) { ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); intravisit::walk_expr(ir, expr); } - hir::ExprBinary(op, _, _) if op.node.is_lazy() => { + hir::ExprBinary(op, ..) if op.node.is_lazy() => { ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); intravisit::walk_expr(ir, expr); } @@ -943,7 +944,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.propagate_through_expr(&e, succ) } - hir::ExprClosure(_, _, ref blk, _) => { + hir::ExprClosure(.., ref blk, _) => { debug!("{} is an ExprClosure", expr_to_string(expr)); @@ -1123,7 +1124,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.propagate_through_expr(&f, succ) } - hir::ExprMethodCall(_, _, ref args) => { + hir::ExprMethodCall(.., ref args) => { let method_call = ty::MethodCall::expr(expr.id); let method_ty = self.ir.tcx.tables.borrow().method_map[&method_call].ty; // FIXME(canndrew): This is_never should really be an is_uninhabited @@ -1270,7 +1271,8 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn access_path(&mut self, expr: &Expr, succ: LiveNode, acc: u32) -> LiveNode { match self.ir.tcx.expect_def(expr.id) { - Def::Local(_, nid) => { + Def::Local(def_id) => { + let nid = self.ir.tcx.map.as_local_node_id(def_id).unwrap(); let ln = self.live_node(expr.id, expr.span); if acc != 0 { self.init_from_succ(ln, succ); @@ -1529,11 +1531,12 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn check_lvalue(&mut self, expr: &Expr) { match expr.node { hir::ExprPath(..) => { - if let Def::Local(_, nid) = self.ir.tcx.expect_def(expr.id) { + if let Def::Local(def_id) = self.ir.tcx.expect_def(expr.id) { // Assignment to an immutable variable or argument: only legal // if there is no later assignment. If this local is actually // mutable, then check for a reassignment to flag the mutability // as being used. + let nid = self.ir.tcx.map.as_local_node_id(def_id).unwrap(); let ln = self.live_node(expr.id, expr.span); let var = self.variable(nid, expr.span); self.warn_about_dead_assign(expr.span, expr.id, ln, var); diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs index 676e456dce..340a5ac8f8 100644 --- a/src/librustc/middle/mem_categorization.rs +++ b/src/librustc/middle/mem_categorization.rs @@ -67,7 +67,6 @@ pub use self::ElementKind::*; pub use self::MutabilityCategory::*; pub use self::AliasableReason::*; pub use self::Note::*; -pub use self::deref_kind::*; use self::Aliasability::*; @@ -90,11 +89,11 @@ use std::rc::Rc; #[derive(Clone, PartialEq)] pub enum Categorization<'tcx> { - Rvalue(ty::Region), // temporary val, argument is its scope + Rvalue(&'tcx ty::Region), // temporary val, argument is its scope StaticItem, Upvar(Upvar), // upvar referenced by closure env Local(ast::NodeId), // local variable - Deref(cmt<'tcx>, usize, PointerKind), // deref of a ptr + Deref(cmt<'tcx>, usize, PointerKind<'tcx>), // deref of a ptr Interior(cmt<'tcx>, InteriorKind), // something interior: field, tuple, etc Downcast(cmt<'tcx>, DefId), // selects a particular enum variant (*1) @@ -110,18 +109,18 @@ pub struct Upvar { // different kinds of pointers: #[derive(Clone, Copy, PartialEq, Eq, Hash)] -pub enum PointerKind { +pub enum PointerKind<'tcx> { /// `Box` Unique, /// `&T` - BorrowedPtr(ty::BorrowKind, ty::Region), + BorrowedPtr(ty::BorrowKind, &'tcx ty::Region), /// `*T` UnsafePtr(hir::Mutability), /// Implicit deref of the `&T` that results from an overloaded index `[]`. - Implicit(ty::BorrowKind, ty::Region), + Implicit(ty::BorrowKind, &'tcx ty::Region), } // We use the term "interior" to mean "something reachable from the @@ -195,52 +194,6 @@ pub struct cmt_<'tcx> { pub type cmt<'tcx> = Rc>; -// We pun on *T to mean both actual deref of a ptr as well -// as accessing of components: -#[derive(Copy, Clone)] -pub enum deref_kind { - deref_ptr(PointerKind), - deref_interior(InteriorKind), -} - -type DerefKindContext = Option; - -// Categorizes a derefable type. Note that we include vectors and strings as -// derefable (we model an index as the combination of a deref and then a -// pointer adjustment). -fn deref_kind(t: Ty, context: DerefKindContext) -> McResult { - match t.sty { - ty::TyBox(_) => { - Ok(deref_ptr(Unique)) - } - - ty::TyRef(r, mt) => { - let kind = ty::BorrowKind::from_mutbl(mt.mutbl); - Ok(deref_ptr(BorrowedPtr(kind, *r))) - } - - ty::TyRawPtr(ref mt) => { - Ok(deref_ptr(UnsafePtr(mt.mutbl))) - } - - ty::TyEnum(..) | - ty::TyStruct(..) => { // newtype - Ok(deref_interior(InteriorField(PositionalField(0)))) - } - - ty::TyArray(_, _) | ty::TySlice(_) => { - // no deref of indexed content without supplying InteriorOffsetKind - if let Some(context) = context { - Ok(deref_interior(InteriorElement(context, ElementKind::VecElement))) - } else { - Err(()) - } - } - - _ => Err(()), - } -} - pub trait ast_node { fn id(&self) -> ast::NodeId; fn span(&self) -> Span; @@ -318,7 +271,7 @@ impl MutabilityCategory { fn from_local(tcx: TyCtxt, id: ast::NodeId) -> MutabilityCategory { let ret = match tcx.map.get(id) { ast_map::NodeLocal(p) => match p.node { - PatKind::Binding(bind_mode, _, _) => { + PatKind::Binding(bind_mode, ..) => { if bind_mode == hir::BindByValue(hir::MutMutable) { McDeclared } else { @@ -419,7 +372,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // *being borrowed* is. But ideally we would put in a more // fundamental fix to this conflated use of the node id. let ret_ty = match pat.node { - PatKind::Binding(hir::BindByRef(_), _, _) => { + PatKind::Binding(hir::BindByRef(_), ..) => { // a bind-by-ref means that the base_ty will be the type of the ident itself, // but what we want here is the type of the underlying value being borrowed. // So peel off one-level, turning the &T into T. @@ -477,7 +430,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { autoderefs, cmt); for deref in 1..autoderefs + 1 { - cmt = self.cat_deref(expr, cmt, deref, None)?; + cmt = self.cat_deref(expr, cmt, deref)?; } return Ok(cmt); } @@ -489,7 +442,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { match expr.node { hir::ExprUnary(hir::UnDeref, ref e_base) => { let base_cmt = self.cat_expr(&e_base)?; - self.cat_deref(expr, base_cmt, 0, None) + self.cat_deref(expr, base_cmt, 0) } hir::ExprField(ref base, f_name) => { @@ -508,7 +461,6 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { hir::ExprIndex(ref base, _) => { let method_call = ty::MethodCall::expr(expr.id()); - let context = InteriorOffsetKind::Index; match self.infcx.node_method_ty(method_call) { Some(method_ty) => { // If this is an index implemented by a method call, then it @@ -530,10 +482,10 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // is an rvalue. That is what we will be // dereferencing. let base_cmt = self.cat_rvalue_node(expr.id(), expr.span(), ret_ty); - self.cat_deref_common(expr, base_cmt, 1, elem_ty, Some(context), true) + Ok(self.cat_deref_common(expr, base_cmt, 1, elem_ty, true)) } None => { - self.cat_index(expr, self.cat_expr(&base)?, context) + self.cat_index(expr, self.cat_expr(&base)?, InteriorOffsetKind::Index) } } } @@ -572,12 +524,12 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { id, expr_ty, def); match def { - Def::Struct(..) | Def::Variant(..) | Def::Const(..) | + Def::Struct(..) | Def::Union(..) | Def::Variant(..) | Def::Const(..) | Def::AssociatedConst(..) | Def::Fn(..) | Def::Method(..) => { Ok(self.cat_rvalue_node(id, span, expr_ty)) } - Def::Mod(_) | Def::ForeignMod(_) | + Def::Mod(_) | Def::Trait(_) | Def::Enum(..) | Def::TyAlias(..) | Def::PrimTy(_) | Def::TyParam(..) | Def::Label(_) | Def::SelfTy(..) | @@ -597,7 +549,8 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { })) } - Def::Upvar(_, var_id, _, fn_node_id) => { + Def::Upvar(def_id, _, fn_node_id) => { + let var_id = self.tcx().map.as_local_node_id(def_id).unwrap(); let ty = self.node_ty(fn_node_id)?; match ty.sty { ty::TyClosure(closure_id, _) => { @@ -633,7 +586,8 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { } } - Def::Local(_, vid) => { + Def::Local(def_id) => { + let vid = self.tcx().map.as_local_node_id(def_id).unwrap(); Ok(Rc::new(cmt_ { id: id, span: span, @@ -761,19 +715,19 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { }; match fn_expr.node { - hir::ExprClosure(_, _, ref body, _) => body.id, + hir::ExprClosure(.., ref body, _) => body.id, _ => bug!() } }; // Region of environment pointer - let env_region = ty::ReFree(ty::FreeRegion { + let env_region = self.tcx().mk_region(ty::ReFree(ty::FreeRegion { // The environment of a closure is guaranteed to // outlive any bindings introduced in the body of the // closure itself. scope: self.tcx().region_maps.item_extent(fn_body_id), bound_region: ty::BrEnv - }); + })); let env_ptr = BorrowedPtr(env_borrow_kind, env_region); @@ -817,11 +771,11 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { /// Returns the lifetime of a temporary created by expr with id `id`. /// This could be `'static` if `id` is part of a constant expression. - pub fn temporary_scope(&self, id: ast::NodeId) -> ty::Region { - match self.infcx.temporary_scope(id) { + pub fn temporary_scope(&self, id: ast::NodeId) -> &'tcx ty::Region { + self.tcx().mk_region(match self.infcx.temporary_scope(id) { Some(scope) => ty::ReScope(scope), None => ty::ReStatic - } + }) } pub fn cat_rvalue_node(&self, @@ -845,7 +799,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { let re = if qualif.intersects(ConstQualif::NON_STATIC_BORROWS) { self.temporary_scope(id) } else { - ty::ReStatic + self.tcx().mk_region(ty::ReStatic) }; let ret = self.cat_rvalue(id, span, re, expr_ty); debug!("cat_rvalue_node ret {:?}", ret); @@ -855,7 +809,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { pub fn cat_rvalue(&self, cmt_id: ast::NodeId, span: Span, - temp_scope: ty::Region, + temp_scope: &'tcx ty::Region, expr_ty: Ty<'tcx>) -> cmt<'tcx> { let ret = Rc::new(cmt_ { id:cmt_id, @@ -908,8 +862,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { fn cat_deref(&self, node: &N, base_cmt: cmt<'tcx>, - deref_cnt: usize, - deref_context: DerefKindContext) + deref_cnt: usize) -> McResult> { let method_call = ty::MethodCall { expr_id: node.id(), @@ -931,12 +884,9 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { let base_cmt_ty = base_cmt.ty; match base_cmt_ty.builtin_deref(true, ty::NoPreference) { Some(mt) => { - let ret = self.cat_deref_common(node, base_cmt, deref_cnt, - mt.ty, - deref_context, - /* implicit: */ false); + let ret = self.cat_deref_common(node, base_cmt, deref_cnt, mt.ty, false); debug!("cat_deref ret {:?}", ret); - ret + Ok(ret) } None => { debug!("Explicit deref of non-derefable type: {:?}", @@ -951,40 +901,29 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { base_cmt: cmt<'tcx>, deref_cnt: usize, deref_ty: Ty<'tcx>, - deref_context: DerefKindContext, implicit: bool) - -> McResult> + -> cmt<'tcx> { - let (m, cat) = match deref_kind(base_cmt.ty, deref_context)? { - deref_ptr(ptr) => { - let ptr = if implicit { - match ptr { - BorrowedPtr(bk, r) => Implicit(bk, r), - _ => span_bug!(node.span(), - "Implicit deref of non-borrowed pointer") - } - } else { - ptr - }; - // for unique ptrs, we inherit mutability from the - // owning reference. - (MutabilityCategory::from_pointer_kind(base_cmt.mutbl, ptr), - Categorization::Deref(base_cmt, deref_cnt, ptr)) - } - deref_interior(interior) => { - (base_cmt.mutbl.inherit(), Categorization::Interior(base_cmt, interior)) + let ptr = match base_cmt.ty.sty { + ty::TyBox(..) => Unique, + ty::TyRawPtr(ref mt) => UnsafePtr(mt.mutbl), + ty::TyRef(r, mt) => { + let bk = ty::BorrowKind::from_mutbl(mt.mutbl); + if implicit { Implicit(bk, r) } else { BorrowedPtr(bk, r) } } + ref ty => bug!("unexpected type in cat_deref_common: {:?}", ty) }; let ret = Rc::new(cmt_ { id: node.id(), span: node.span(), - cat: cat, - mutbl: m, + // For unique ptrs, we inherit mutability from the owning reference. + mutbl: MutabilityCategory::from_pointer_kind(base_cmt.mutbl, ptr), + cat: Categorization::Deref(base_cmt, deref_cnt, ptr), ty: deref_ty, note: NoteNone }); debug!("cat_deref_common ret {:?}", ret); - Ok(ret) + ret } pub fn cat_index(&self, @@ -1138,23 +1077,28 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // alone) because PatKind::Struct can also refer to variants. let cmt = match self.tcx().expect_def_or_none(pat.id) { Some(Def::Err) => return Err(()), - Some(Def::Variant(enum_did, variant_did)) + Some(Def::Variant(variant_did)) => { // univariant enums do not need downcasts - if !self.tcx().lookup_adt_def(enum_did).is_univariant() => { + let enum_did = self.tcx().parent_def_id(variant_did).unwrap(); + if !self.tcx().lookup_adt_def(enum_did).is_univariant() { self.cat_downcast(pat, cmt.clone(), cmt.ty, variant_did) + } else { + cmt } + } _ => cmt }; match pat.node { PatKind::TupleStruct(_, ref subpats, ddpos) => { let expected_len = match self.tcx().expect_def(pat.id) { - Def::Variant(enum_def, def_id) => { + Def::Variant(def_id) => { + let enum_def = self.tcx().parent_def_id(def_id).unwrap(); self.tcx().lookup_adt_def(enum_def).variant_with_id(def_id).fields.len() } Def::Struct(..) => { match self.pat_ty(&pat)?.sty { - ty::TyStruct(adt_def, _) => { + ty::TyAdt(adt_def, _) => { adt_def.struct_variant().fields.len() } ref ty => { @@ -1185,7 +1129,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { } } - PatKind::Binding(_, _, Some(ref subpat)) => { + PatKind::Binding(.., Some(ref subpat)) => { self.cat_pattern_(cmt, &subpat, op)?; } @@ -1207,7 +1151,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // box p1, &p1, &mut p1. we can ignore the mutability of // PatKind::Ref since that information is already contained // in the type. - let subcmt = self.cat_deref(pat, cmt, 0, None)?; + let subcmt = self.cat_deref(pat, cmt, 0)?; self.cat_pattern_(subcmt, &subpat, op)?; } @@ -1225,7 +1169,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { } } - PatKind::Path(..) | PatKind::Binding(_, _, None) | + PatKind::Path(..) | PatKind::Binding(.., None) | PatKind::Lit(..) | PatKind::Range(..) | PatKind::Wild => { // always ok } @@ -1275,9 +1219,9 @@ impl<'tcx> cmt_<'tcx> { Categorization::Rvalue(..) | Categorization::StaticItem | Categorization::Local(..) | - Categorization::Deref(_, _, UnsafePtr(..)) | - Categorization::Deref(_, _, BorrowedPtr(..)) | - Categorization::Deref(_, _, Implicit(..)) | + Categorization::Deref(.., UnsafePtr(..)) | + Categorization::Deref(.., BorrowedPtr(..)) | + Categorization::Deref(.., Implicit(..)) | Categorization::Upvar(..) => { Rc::new((*self).clone()) } @@ -1320,7 +1264,7 @@ impl<'tcx> cmt_<'tcx> { Categorization::Rvalue(..) | Categorization::Local(..) | Categorization::Upvar(..) | - Categorization::Deref(_, _, UnsafePtr(..)) => { // yes, it's aliasable, but... + Categorization::Deref(.., UnsafePtr(..)) => { // yes, it's aliasable, but... NonAliasable } @@ -1349,9 +1293,9 @@ impl<'tcx> cmt_<'tcx> { match self.note { NoteClosureEnv(..) | NoteUpvarRef(..) => { Some(match self.cat { - Categorization::Deref(ref inner, _, _) => { + Categorization::Deref(ref inner, ..) => { match inner.cat { - Categorization::Deref(ref inner, _, _) => inner.clone(), + Categorization::Deref(ref inner, ..) => inner.clone(), Categorization::Upvar(..) => inner.clone(), _ => bug!() } @@ -1379,7 +1323,7 @@ impl<'tcx> cmt_<'tcx> { "local variable".to_string() } } - Categorization::Deref(_, _, pk) => { + Categorization::Deref(.., pk) => { let upvar = self.upvar(); match upvar.as_ref().map(|i| &i.cat) { Some(&Categorization::Upvar(ref var)) => { @@ -1480,7 +1424,7 @@ pub fn ptr_sigil(ptr: PointerKind) -> &'static str { } } -impl fmt::Debug for PointerKind { +impl<'tcx> fmt::Debug for PointerKind<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Unique => write!(f, "Box"), diff --git a/src/librustc/middle/privacy.rs b/src/librustc/middle/privacy.rs index 478f662d09..189150d426 100644 --- a/src/librustc/middle/privacy.rs +++ b/src/librustc/middle/privacy.rs @@ -23,9 +23,8 @@ use syntax::ast::NodeId; pub enum AccessLevel { // Exported items + items participating in various kinds of public interfaces, // but not directly nameable. For example, if function `fn f() -> T {...}` is - // public, then type `T` is exported. Its values can be obtained by other crates - // even if the type itseld is not nameable. - // FIXME: Mostly unimplemented. Only `type` aliases export items currently. + // public, then type `T` is reachable. Its values can be obtained by other crates + // even if the type itself is not nameable. Reachable, // Public items + items accessible to other crates with help of `pub use` reexports Exported, diff --git a/src/librustc/middle/reachable.rs b/src/librustc/middle/reachable.rs index 6ea0fa20c5..beffaff1e5 100644 --- a/src/librustc/middle/reachable.rs +++ b/src/librustc/middle/reachable.rs @@ -22,9 +22,8 @@ use hir::def_id::DefId; use ty::{self, TyCtxt}; use middle::privacy; use session::config; -use util::nodemap::NodeSet; +use util::nodemap::{NodeSet, FnvHashSet}; -use std::collections::HashSet; use syntax::abi::Abi; use syntax::ast; use syntax::attr; @@ -47,8 +46,8 @@ fn item_might_be_inlined(item: &hir::Item) -> bool { } match item.node { - hir::ItemImpl(_, _, ref generics, _, _, _) | - hir::ItemFn(_, _, _, _, ref generics, _) => { + hir::ItemImpl(_, _, ref generics, ..) | + hir::ItemFn(.., ref generics, _) => { generics_require_inlining(generics) } _ => false, @@ -139,7 +138,8 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // Creates a new reachability computation context. fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> ReachableContext<'a, 'tcx> { let any_library = tcx.sess.crate_types.borrow().iter().any(|ty| { - *ty == config::CrateTypeRlib || *ty == config::CrateTypeDylib + *ty == config::CrateTypeRlib || *ty == config::CrateTypeDylib || + *ty == config::CrateTypeRustcMacro }); ReachableContext { tcx: tcx, @@ -187,7 +187,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // does too. let impl_node_id = self.tcx.map.as_local_node_id(impl_did).unwrap(); match self.tcx.map.expect_item(impl_node_id).node { - hir::ItemImpl(_, _, ref generics, _, _, _) => { + hir::ItemImpl(_, _, ref generics, ..) => { generics_require_inlining(generics) } _ => false @@ -204,7 +204,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // Step 2: Mark all symbols that the symbols on the worklist touch. fn propagate(&mut self) { - let mut scanned = HashSet::new(); + let mut scanned = FnvHashSet(); loop { let search_item = match self.worklist.pop() { Some(item) => item, @@ -226,7 +226,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // If we are building an executable, only explicitly extern // types need to be exported. if let ast_map::NodeItem(item) = *node { - let reachable = if let hir::ItemFn(_, _, _, abi, _, _) = item.node { + let reachable = if let hir::ItemFn(.., abi, _, _) = item.node { abi != Abi::Rust } else { false @@ -248,7 +248,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { match *node { ast_map::NodeItem(item) => { match item.node { - hir::ItemFn(_, _, _, _, _, ref search_block) => { + hir::ItemFn(.., ref search_block) => { if item_might_be_inlined(&item) { intravisit::walk_block(self, &search_block) } @@ -265,11 +265,11 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // inherently and their children are already in the // worklist, as determined by the privacy pass hir::ItemExternCrate(_) | hir::ItemUse(_) | - hir::ItemTy(..) | hir::ItemStatic(_, _, _) | + hir::ItemTy(..) | hir::ItemStatic(..) | hir::ItemMod(..) | hir::ItemForeignMod(..) | hir::ItemImpl(..) | hir::ItemTrait(..) | hir::ItemStruct(..) | hir::ItemEnum(..) | - hir::ItemDefaultImpl(..) => {} + hir::ItemUnion(..) | hir::ItemDefaultImpl(..) => {} } } ast_map::NodeTraitItem(trait_method) => { @@ -329,7 +329,7 @@ struct CollectPrivateImplItemsVisitor<'a> { impl<'a, 'v> Visitor<'v> for CollectPrivateImplItemsVisitor<'a> { fn visit_item(&mut self, item: &hir::Item) { // We need only trait impls here, not inherent impls, and only non-exported ones - if let hir::ItemImpl(_, _, _, Some(_), _, ref impl_items) = item.node { + if let hir::ItemImpl(.., Some(_), _, ref impl_items) = item.node { if !self.access_levels.is_reachable(item.id) { for impl_item in impl_items { self.worklist.push(impl_item.id); diff --git a/src/librustc/middle/recursion_limit.rs b/src/librustc/middle/recursion_limit.rs index 7dcd358165..0764e817f4 100644 --- a/src/librustc/middle/recursion_limit.rs +++ b/src/librustc/middle/recursion_limit.rs @@ -17,7 +17,6 @@ use session::Session; use syntax::ast; -use syntax::attr::AttrMetaMethods; pub fn update_recursion_limit(sess: &Session, krate: &ast::Crate) { for attr in &krate.attrs { diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs index 6f0ad087dc..33110c61e8 100644 --- a/src/librustc/middle/region.rs +++ b/src/librustc/middle/region.rs @@ -20,7 +20,6 @@ use dep_graph::DepNode; use hir::map as ast_map; use session::Session; use util::nodemap::{FnvHashMap, NodeMap, NodeSet}; -use middle::cstore::InlinedItem; use ty; use std::cell::RefCell; @@ -237,7 +236,7 @@ impl CodeExtent { // (This is the special case aluded to in the // doc-comment for this method) let stmt_span = blk.stmts[r.first_statement_index as usize].span; - Some(Span { lo: stmt_span.hi, ..blk.span }) + Some(Span { lo: stmt_span.hi, hi: blk.span.hi, expn_id: stmt_span.expn_id }) } } } @@ -803,7 +802,8 @@ fn resolve_expr(visitor: &mut RegionResolutionVisitor, expr: &hir::Expr) { terminating(r.id); } - hir::ExprIf(_, ref then, Some(ref otherwise)) => { + hir::ExprIf(ref expr, ref then, Some(ref otherwise)) => { + terminating(expr.id); terminating(then.id); terminating(otherwise.id); } @@ -955,7 +955,7 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &hir::Local) { /// | box P& fn is_binding_pat(pat: &hir::Pat) -> bool { match pat.node { - PatKind::Binding(hir::BindByRef(_), _, _) => true, + PatKind::Binding(hir::BindByRef(_), ..) => true, PatKind::Struct(_, ref field_pats, _) => { field_pats.iter().any(|fp| is_binding_pat(&fp.node.pat)) @@ -1255,19 +1255,3 @@ pub fn resolve_crate(sess: &Session, map: &ast_map::Map) -> RegionMaps { } return maps; } - -pub fn resolve_inlined_item(sess: &Session, - region_maps: &RegionMaps, - item: &InlinedItem) { - let mut visitor = RegionResolutionVisitor { - sess: sess, - region_maps: region_maps, - cx: Context { - root_id: None, - parent: ROOT_CODE_EXTENT, - var_parent: ROOT_CODE_EXTENT - }, - terminating_scopes: NodeSet() - }; - item.visit(&mut visitor); -} diff --git a/src/librustc/middle/resolve_lifetime.rs b/src/librustc/middle/resolve_lifetime.rs index 9ccca9e6a0..2d93c33afb 100644 --- a/src/librustc/middle/resolve_lifetime.rs +++ b/src/librustc/middle/resolve_lifetime.rs @@ -24,9 +24,7 @@ use session::Session; use hir::def::{Def, DefMap}; use hir::def_id::DefId; use middle::region; -use ty::subst; use ty; -use std::fmt; use std::mem::replace; use syntax::ast; use syntax::parse::token::keywords; @@ -41,8 +39,7 @@ use hir::intravisit::{self, Visitor, FnKind}; #[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)] pub enum DefRegion { DefStaticRegion, - DefEarlyBoundRegion(/* space */ subst::ParamSpace, - /* index */ u32, + DefEarlyBoundRegion(/* index */ u32, /* lifetime decl */ ast::NodeId), DefLateBoundRegion(ty::DebruijnIndex, /* lifetime decl */ ast::NodeId), @@ -90,10 +87,14 @@ struct LifetimeContext<'a, 'tcx: 'a> { labels_in_fn: Vec<(ast::Name, Span)>, } +#[derive(PartialEq, Debug)] enum ScopeChain<'a> { - /// EarlyScope(i, ['a, 'b, ...], s) extends s with early-bound - /// lifetimes, assigning indexes 'a => i, 'b => i+1, ... etc. - EarlyScope(subst::ParamSpace, &'a [hir::LifetimeDef], Scope<'a>), + /// EarlyScope(['a, 'b, ...], start, s) extends s with early-bound + /// lifetimes, with consecutive parameter indices from `start`. + /// That is, 'a has index `start`, 'b has index `start + 1`, etc. + /// Indices before `start` correspond to other generic parameters + /// of a parent item (trait/impl of a method), or `Self` in traits. + EarlyScope(&'a [hir::LifetimeDef], u32, Scope<'a>), /// LateScope(['a, 'b, ...], s) extends s with late-bound /// lifetimes introduced by the declaration binder_id. LateScope(&'a [hir::LifetimeDef], Scope<'a>), @@ -155,12 +156,17 @@ impl<'a, 'tcx, 'v> Visitor<'v> for LifetimeContext<'a, 'tcx> { hir::ItemTy(_, ref generics) | hir::ItemEnum(_, ref generics) | hir::ItemStruct(_, ref generics) | - hir::ItemTrait(_, ref generics, _, _) | - hir::ItemImpl(_, _, ref generics, _, _, _) => { + hir::ItemUnion(_, ref generics) | + hir::ItemTrait(_, ref generics, ..) | + hir::ItemImpl(_, _, ref generics, ..) => { // These kinds of items have only early bound lifetime parameters. let lifetimes = &generics.lifetimes; - let early_scope = EarlyScope(subst::TypeSpace, lifetimes, &ROOT_SCOPE); - this.with(early_scope, |old_scope, this| { + let start = if let hir::ItemTrait(..) = item.node { + 1 // Self comes before lifetimes + } else { + 0 + }; + this.with(EarlyScope(lifetimes, start, &ROOT_SCOPE), |old_scope, this| { this.check_lifetime_defs(old_scope, lifetimes); intravisit::walk_item(this, item); }); @@ -181,11 +187,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for LifetimeContext<'a, 'tcx> { self.with(RootScope, |_, this| { match item.node { hir::ForeignItemFn(ref decl, ref generics) => { - this.visit_early_late(item.id, - subst::FnSpace, - decl, - generics, - |this| { + this.visit_early_late(item.id, decl, generics, |this| { intravisit::walk_foreign_item(this, item); }) } @@ -202,15 +204,14 @@ impl<'a, 'tcx, 'v> Visitor<'v> for LifetimeContext<'a, 'tcx> { fn visit_fn(&mut self, fk: FnKind<'v>, decl: &'v hir::FnDecl, b: &'v hir::Block, s: Span, fn_id: ast::NodeId) { match fk { - FnKind::ItemFn(_, generics, _, _, _, _, _) => { - self.visit_early_late(fn_id, subst::FnSpace, decl, generics, |this| { + FnKind::ItemFn(_, generics, ..) => { + self.visit_early_late(fn_id,decl, generics, |this| { this.add_scope_and_walk_fn(fk, decl, b, s, fn_id) }) } - FnKind::Method(_, sig, _, _) => { + FnKind::Method(_, sig, ..) => { self.visit_early_late( fn_id, - subst::FnSpace, decl, &sig.generics, |this| this.add_scope_and_walk_fn(fk, decl, b, s, fn_id)); @@ -263,7 +264,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for LifetimeContext<'a, 'tcx> { if let hir::MethodTraitItem(ref sig, None) = trait_item.node { self.visit_early_late( - trait_item.id, subst::FnSpace, + trait_item.id, &sig.decl, &sig.generics, |this| intravisit::walk_trait_item(this, trait_item)) } else { @@ -336,7 +337,6 @@ impl<'a, 'tcx, 'v> Visitor<'v> for LifetimeContext<'a, 'tcx> { if !self.trait_ref_hack || !trait_ref.bound_lifetimes.is_empty() { if self.trait_ref_hack { - println!("{:?}", trait_ref.span); span_err!(self.sess, trait_ref.span, E0316, "nested quantification of lifetimes"); } @@ -395,9 +395,9 @@ fn signal_shadowing_problem(sess: &Session, name: ast::Name, orig: Original, sha {} name that is already in scope", shadower.kind.desc(), name, orig.kind.desc())) }; - err.span_note(orig.span, - &format!("shadowed {} `{}` declared here", - orig.kind.desc(), name)); + err.span_label(orig.span, &"first declared here"); + err.span_label(shadower.span, + &format!("lifetime {} already in scope", name)); err.emit(); } @@ -454,7 +454,7 @@ fn extract_labels(ctxt: &mut LifetimeContext, b: &hir::Block) { fn expression_label(ex: &hir::Expr) -> Option<(ast::Name, Span)> { match ex.node { - hir::ExprWhile(_, _, Some(label)) | + hir::ExprWhile(.., Some(label)) | hir::ExprLoop(_, Some(label)) => Some((label.node, label.span)), _ => None, } @@ -469,7 +469,7 @@ fn extract_labels(ctxt: &mut LifetimeContext, b: &hir::Block) { FnScope { s, .. } => { scope = s; } RootScope => { return; } - EarlyScope(_, lifetimes, s) | + EarlyScope(lifetimes, _, s) | LateScope(lifetimes, s) => { for lifetime_def in lifetimes { // FIXME (#24278): non-hygienic comparison @@ -498,11 +498,11 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { fn_id: ast::NodeId) { match fk { - FnKind::ItemFn(_, generics, _, _, _, _, _) => { + FnKind::ItemFn(_, generics, ..) => { intravisit::walk_fn_decl(self, fd); self.visit_generics(generics); } - FnKind::Method(_, sig, _, _) => { + FnKind::Method(_, sig, ..) => { intravisit::walk_fn_decl(self, fd); self.visit_generics(&sig.generics); } @@ -557,7 +557,6 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { /// ordering is not important there. fn visit_early_late(&mut self, fn_id: ast::NodeId, - early_space: subst::ParamSpace, decl: &hir::FnDecl, generics: &hir::Generics, walk: F) where @@ -575,8 +574,24 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { .cloned() .partition(|l| self.map.late_bound.contains_key(&l.lifetime.id)); + // Find the start of nested early scopes, e.g. in methods. + let mut start = 0; + if let EarlyScope(..) = *self.scope { + let parent = self.hir_map.expect_item(self.hir_map.get_parent(fn_id)); + if let hir::ItemTrait(..) = parent.node { + start += 1; // Self comes first. + } + match parent.node { + hir::ItemTrait(_, ref generics, ..) | + hir::ItemImpl(_, _, ref generics, ..) => { + start += generics.lifetimes.len() + generics.ty_params.len(); + } + _ => {} + } + } + let this = self; - this.with(EarlyScope(early_space, &early, this.scope), move |old_scope, this| { + this.with(EarlyScope(&early, start as u32, this.scope), move |old_scope, this| { this.with(LateScope(&late, this.scope), move |_, this| { this.check_lifetime_defs(old_scope, &generics.lifetimes); walk(this); @@ -606,11 +621,11 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { break; } - EarlyScope(space, lifetimes, s) => { + EarlyScope(lifetimes, start, s) => { match search_lifetimes(lifetimes, lifetime_ref) { Some((index, lifetime_def)) => { let decl_id = lifetime_def.id; - let def = DefEarlyBoundRegion(space, index, decl_id); + let def = DefEarlyBoundRegion(start + index, decl_id); self.insert_lifetime(lifetime_ref, def); return; } @@ -672,7 +687,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { break; } - EarlyScope(_, lifetimes, s) | + EarlyScope(lifetimes, _, s) | LateScope(lifetimes, s) => { search_result = search_lifetimes(lifetimes, lifetime_ref); if search_result.is_some() { @@ -768,7 +783,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { return; } - EarlyScope(_, lifetimes, s) | + EarlyScope(lifetimes, _, s) | LateScope(lifetimes, s) => { if let Some((_, lifetime_def)) = search_lifetimes(lifetimes, lifetime) { signal_shadowing_problem( @@ -963,14 +978,3 @@ fn insert_late_bound_lifetimes(map: &mut NamedRegionMap, } } } - -impl<'a> fmt::Debug for ScopeChain<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match *self { - EarlyScope(space, defs, _) => write!(fmt, "EarlyScope({:?}, {:?})", space, defs), - LateScope(defs, _) => write!(fmt, "LateScope({:?})", defs), - FnScope { fn_id, body_id, s: _ } => write!(fmt, "FnScope({:?}, {:?})", fn_id, body_id), - RootScope => write!(fmt, "RootScope"), - } - } -} diff --git a/src/librustc/middle/stability.rs b/src/librustc/middle/stability.rs index cbbc2c4f98..ccab427923 100644 --- a/src/librustc/middle/stability.rs +++ b/src/librustc/middle/stability.rs @@ -17,17 +17,16 @@ use dep_graph::DepNode; use hir::map as hir_map; use session::Session; use lint; -use middle::cstore::LOCAL_CRATE; use hir::def::Def; -use hir::def_id::{CRATE_DEF_INDEX, DefId, DefIndex}; -use ty::{self, TyCtxt}; +use hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, DefIndex, LOCAL_CRATE}; +use ty::{self, TyCtxt, AdtKind}; use middle::privacy::AccessLevels; use syntax::parse::token::InternedString; use syntax_pos::{Span, DUMMY_SP}; use syntax::ast; use syntax::ast::{NodeId, Attribute}; use syntax::feature_gate::{GateIssue, emit_feature_err, find_lang_feature_accepted_version}; -use syntax::attr::{self, Stability, Deprecation, AttrMetaMethods}; +use syntax::attr::{self, Stability, Deprecation}; use util::nodemap::{DefIdMap, FnvHashSet, FnvHashMap}; use hir; @@ -103,7 +102,7 @@ pub struct Index<'tcx> { depr_map: DefIdMap>, /// Maps for each crate whether it is part of the staged API. - staged_api: FnvHashMap + staged_api: FnvHashMap } // A private tree-walker for producing an Index. @@ -252,11 +251,11 @@ impl<'a, 'tcx, 'v> Visitor<'v> for Annotator<'a, 'tcx> { // they don't have their own stability. They still can be annotated as unstable // and propagate this unstability to children, but this annotation is completely // optional. They inherit stability from their parents when unannotated. - hir::ItemImpl(_, _, _, None, _, _) | hir::ItemForeignMod(..) => { + hir::ItemImpl(.., None, _, _) | hir::ItemForeignMod(..) => { self.in_trait_impl = false; kind = AnnotationKind::Container; } - hir::ItemImpl(_, _, _, Some(_), _, _) => { + hir::ItemImpl(.., Some(_), _, _) => { self.in_trait_impl = true; } hir::ItemStruct(ref sd, _) => { @@ -412,8 +411,8 @@ impl<'a, 'tcx> Checker<'a, 'tcx> { &feature, &r), None => format!("use of unstable library feature '{}'", &feature) }; - emit_feature_err(&self.tcx.sess.parse_sess.span_diagnostic, - &feature, span, GateIssue::Library(Some(issue)), &msg); + emit_feature_err(&self.tcx.sess.parse_sess, &feature, span, + GateIssue::Library(Some(issue)), &msg); } } Some(&Stability { ref level, ref feature, .. }) => { @@ -528,7 +527,7 @@ pub fn check_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // For implementations of traits, check the stability of each item // individually as it's possible to have a stable trait with unstable // items. - hir::ItemImpl(_, _, _, Some(ref t), _, ref impl_items) => { + hir::ItemImpl(.., Some(ref t), _, ref impl_items) => { let trait_did = tcx.expect_def(t.ref_id).def_id(); let trait_items = tcx.trait_items(trait_did); @@ -553,7 +552,7 @@ pub fn check_expr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, e: &hir::Expr, &Option)) { let span; let id = match e.node { - hir::ExprMethodCall(i, _, _) => { + hir::ExprMethodCall(i, ..) => { span = i.span; let method_call = ty::MethodCall::expr(e.id); tcx.tables.borrow().method_map[&method_call].def_id @@ -561,15 +560,19 @@ pub fn check_expr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, e: &hir::Expr, hir::ExprField(ref base_e, ref field) => { span = field.span; match tcx.expr_ty_adjusted(base_e).sty { - ty::TyStruct(def, _) => def.struct_variant().field_named(field.node).did, + ty::TyAdt(def, _) => { + def.struct_variant().field_named(field.node).did + } _ => span_bug!(e.span, - "stability::check_expr: named field access on non-struct") + "stability::check_expr: named field access on non-ADT") } } hir::ExprTupField(ref base_e, ref field) => { span = field.span; match tcx.expr_ty_adjusted(base_e).sty { - ty::TyStruct(def, _) => def.struct_variant().fields[field.node].did, + ty::TyAdt(def, _) => { + def.struct_variant().fields[field.node].did + } ty::TyTuple(..) => return, _ => span_bug!(e.span, "stability::check_expr: unnamed field access on \ @@ -577,31 +580,28 @@ pub fn check_expr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, e: &hir::Expr, } } hir::ExprStruct(_, ref expr_fields, _) => { - let type_ = tcx.expr_ty(e); - match type_.sty { - ty::TyStruct(def, _) => { - // check the stability of each field that appears - // in the construction expression. - for field in expr_fields { - let did = def.struct_variant() - .field_named(field.name.node) - .did; - maybe_do_stability_check(tcx, did, field.span, cb); - } + match tcx.expr_ty(e).sty { + ty::TyAdt(adt, ..) => match adt.adt_kind() { + AdtKind::Struct | AdtKind::Union => { + // check the stability of each field that appears + // in the construction expression. + for field in expr_fields { + let did = adt.struct_variant().field_named(field.name.node).did; + maybe_do_stability_check(tcx, did, field.span, cb); + } - // we're done. - return - } - // we don't look at stability attributes on - // struct-like enums (yet...), but it's definitely not - // a bug to have construct one. - ty::TyEnum(..) => return, - _ => { - span_bug!(e.span, - "stability::check_expr: struct construction \ - of non-struct, type {:?}", - type_); - } + // we're done. + return + } + AdtKind::Enum => { + // we don't look at stability attributes on + // struct-like enums (yet...), but it's definitely not + // a bug to have construct one. + return + } + }, + ref ty => span_bug!(e.span, "stability::check_expr: struct \ + construction of non-ADT type: {:?}", ty) } } _ => return @@ -631,7 +631,7 @@ pub fn check_path_list_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option)) { - match tcx.expect_def(item.node.id()) { + match tcx.expect_def(item.node.id) { Def::PrimTy(..) => {} def => { maybe_do_stability_check(tcx, def.def_id(), item.span, cb); @@ -646,9 +646,9 @@ pub fn check_pat<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, pat: &hir::Pat, debug!("check_pat(pat = {:?})", pat); if is_internal(tcx, pat.span) { return; } - let v = match tcx.pat_ty_opt(pat) { - Some(&ty::TyS { sty: ty::TyStruct(def, _), .. }) => def.struct_variant(), - Some(_) | None => return, + let v = match tcx.pat_ty_opt(pat).map(|ty| &ty.sty) { + Some(&ty::TyAdt(adt, _)) if !adt.is_enum() => adt.struct_variant(), + _ => return, }; match pat.node { // Foo(a, b, c) @@ -695,10 +695,9 @@ fn is_internal<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, span: Span) -> bool { fn is_staged_api<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefId) -> bool { match tcx.trait_item_of_item(id) { - Some(ty::MethodTraitItemId(trait_method_id)) - if trait_method_id != id => { - is_staged_api(tcx, trait_method_id) - } + Some(trait_method_id) if trait_method_id != id => { + is_staged_api(tcx, trait_method_id) + } _ => { *tcx.stability.borrow_mut().staged_api.entry(id.krate).or_insert_with( || tcx.sess.cstore.is_staged_api(id.krate)) diff --git a/src/librustc/middle/weak_lang_items.rs b/src/librustc/middle/weak_lang_items.rs index 6fb1b16705..c2f275e6de 100644 --- a/src/librustc/middle/weak_lang_items.rs +++ b/src/librustc/middle/weak_lang_items.rs @@ -70,6 +70,7 @@ fn verify(sess: &Session, items: &lang_items::LanguageItems) { let needs_check = sess.crate_types.borrow().iter().any(|kind| { match *kind { config::CrateTypeDylib | + config::CrateTypeRustcMacro | config::CrateTypeCdylib | config::CrateTypeExecutable | config::CrateTypeStaticlib => true, diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index 2bde3d6554..c55a319d34 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -180,13 +180,39 @@ impl<'tcx> Mir<'tcx> { Some(Local::new(idx)) } - /// Counts the number of locals, such that that local_index + /// Counts the number of locals, such that local_index /// will always return an index smaller than this count. pub fn count_locals(&self) -> usize { self.arg_decls.len() + self.var_decls.len() + self.temp_decls.len() + 1 } + + pub fn format_local(&self, local: Local) -> String { + let mut index = local.index(); + index = match index.checked_sub(self.arg_decls.len()) { + None => return format!("{:?}", Arg::new(index)), + Some(index) => index, + }; + index = match index.checked_sub(self.var_decls.len()) { + None => return format!("{:?}", Var::new(index)), + Some(index) => index, + }; + index = match index.checked_sub(self.temp_decls.len()) { + None => return format!("{:?}", Temp::new(index)), + Some(index) => index, + }; + debug_assert!(index == 0); + return "ReturnPointer".to_string() + } + + /// Changes a statement to a nop. This is both faster than deleting instructions and avoids + /// invalidating statement indices in `Location`s. + pub fn make_statement_nop(&mut self, location: Location) { + let block = &mut self[location.block]; + debug_assert!(location.statement_index < block.statements.len()); + block.statements[location.statement_index].make_nop() + } } impl<'tcx> Index for Mir<'tcx> { @@ -686,6 +712,14 @@ pub struct Statement<'tcx> { pub kind: StatementKind<'tcx>, } +impl<'tcx> Statement<'tcx> { + /// Changes a statement to a nop. This is both faster than deleting instructions and avoids + /// invalidating statement indices in `Location`s. + pub fn make_nop(&mut self) { + self.kind = StatementKind::Nop + } +} + #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] pub enum StatementKind<'tcx> { /// Write the RHS Rvalue to the LHS Lvalue. @@ -699,6 +733,9 @@ pub enum StatementKind<'tcx> { /// End the current live range for the storage of the local. StorageDead(Lvalue<'tcx>), + + /// No-op. Useful for deleting instructions without affecting statement indices. + Nop, } impl<'tcx> Debug for Statement<'tcx> { @@ -711,6 +748,7 @@ impl<'tcx> Debug for Statement<'tcx> { SetDiscriminant{lvalue: ref lv, variant_index: index} => { write!(fmt, "discriminant({:?}) = {:?}", lv, index) } + Nop => write!(fmt, "nop"), } } } @@ -824,6 +862,24 @@ impl<'tcx> Lvalue<'tcx> { elem: elem, })) } + + pub fn from_local(mir: &Mir<'tcx>, local: Local) -> Lvalue<'tcx> { + let mut index = local.index(); + index = match index.checked_sub(mir.arg_decls.len()) { + None => return Lvalue::Arg(Arg(index as u32)), + Some(index) => index, + }; + index = match index.checked_sub(mir.var_decls.len()) { + None => return Lvalue::Var(Var(index as u32)), + Some(index) => index, + }; + index = match index.checked_sub(mir.temp_decls.len()) { + None => return Lvalue::Temp(Temp(index as u32)), + Some(index) => index, + }; + debug_assert!(index == 0); + Lvalue::ReturnPointer + } } impl<'tcx> Debug for Lvalue<'tcx> { @@ -911,7 +967,7 @@ pub enum Rvalue<'tcx> { Repeat(Operand<'tcx>, TypedConstVal<'tcx>), /// &x or &mut x - Ref(Region, BorrowKind, Lvalue<'tcx>), + Ref(&'tcx Region, BorrowKind, Lvalue<'tcx>), /// length of a [X] or [X;n] value Len(Lvalue<'tcx>), @@ -962,7 +1018,10 @@ pub enum CastKind { pub enum AggregateKind<'tcx> { Vec, Tuple, - Adt(AdtDef<'tcx>, usize, &'tcx Substs<'tcx>), + /// The second field is variant number (discriminant), it's equal to 0 + /// for struct and union expressions. The fourth field is active field + /// number and is present only for union expressions. + Adt(AdtDef<'tcx>, usize, &'tcx Substs<'tcx>, Option), Closure(DefId, ClosureSubsts<'tcx>), } @@ -1069,14 +1128,10 @@ impl<'tcx> Debug for Rvalue<'tcx> { } } - Adt(adt_def, variant, substs) => { + Adt(adt_def, variant, substs, _) => { let variant_def = &adt_def.variants[variant]; - ppaux::parameterized(fmt, substs, variant_def.did, - ppaux::Ns::Value, &[], - |tcx| { - Some(tcx.lookup_item_type(variant_def.did).generics) - })?; + ppaux::parameterized(fmt, substs, variant_def.did, &[])?; match variant_def.kind { ty::VariantKind::Unit => Ok(()), @@ -1098,7 +1153,9 @@ impl<'tcx> Debug for Rvalue<'tcx> { tcx.with_freevars(node_id, |freevars| { for (freevar, lv) in freevars.iter().zip(lvs) { - let var_name = tcx.local_var_name_str(freevar.def.var_id()); + let def_id = freevar.def.def_id(); + let var_id = tcx.map.as_local_node_id(def_id).unwrap(); + let var_name = tcx.local_var_name_str(var_id); struct_fmt.field(&var_name, lv); } }); @@ -1169,9 +1226,7 @@ impl<'tcx> Debug for Literal<'tcx> { use self::Literal::*; match *self { Item { def_id, substs } => { - ppaux::parameterized( - fmt, substs, def_id, ppaux::Ns::Value, &[], - |tcx| Some(tcx.lookup_item_type(def_id).generics)) + ppaux::parameterized(fmt, substs, def_id, &[]) } Value { ref value } => { write!(fmt, "const ")?; @@ -1244,3 +1299,29 @@ impl<'a, 'b> GraphSuccessors<'b> for Mir<'a> { type Item = BasicBlock; type Iter = IntoIter; } + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] +pub struct Location { + /// the location is within this block + pub block: BasicBlock, + + /// the location is the start of the this statement; or, if `statement_index` + /// == num-statements, then the start of the terminator. + pub statement_index: usize, +} + +impl fmt::Debug for Location { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "{:?}[{}]", self.block, self.statement_index) + } +} + +impl Location { + pub fn dominates(&self, other: &Location, dominators: &Dominators) -> bool { + if self.block == other.block { + self.statement_index <= other.statement_index + } else { + dominators.is_dominated_by(other.block, self.block) + } + } +} diff --git a/src/librustc/mir/tcx.rs b/src/librustc/mir/tcx.rs index cf91229f1c..74ad6c602f 100644 --- a/src/librustc/mir/tcx.rs +++ b/src/librustc/mir/tcx.rs @@ -40,7 +40,7 @@ impl<'a, 'gcx, 'tcx> LvalueTy<'tcx> { LvalueTy::Ty { ty } => ty, LvalueTy::Downcast { adt_def, substs, variant_index: _ } => - tcx.mk_enum(adt_def, substs), + tcx.mk_adt(adt_def, substs), } } @@ -75,7 +75,8 @@ impl<'a, 'gcx, 'tcx> LvalueTy<'tcx> { } ProjectionElem::Downcast(adt_def1, index) => match self.to_ty(tcx).sty { - ty::TyEnum(adt_def, substs) => { + ty::TyAdt(adt_def, substs) => { + assert!(adt_def.is_enum()); assert!(index < adt_def.variants.len()); assert_eq!(adt_def, adt_def1); LvalueTy::Downcast { adt_def: adt_def, @@ -83,7 +84,7 @@ impl<'a, 'gcx, 'tcx> LvalueTy<'tcx> { variant_index: index } } _ => { - bug!("cannot downcast non-enum type: `{:?}`", self) + bug!("cannot downcast non-ADT type: `{:?}`", self) } }, ProjectionElem::Field(_, fty) => LvalueTy::Ty { ty: fty } @@ -145,8 +146,7 @@ impl<'tcx> Rvalue<'tcx> { } &Rvalue::Ref(reg, bk, ref lv) => { let lv_ty = lv.ty(mir, tcx).to_ty(tcx); - Some(tcx.mk_ref( - tcx.mk_region(reg), + Some(tcx.mk_ref(reg, ty::TypeAndMut { ty: lv_ty, mutbl: bk.to_mutbl_lossy() @@ -154,7 +154,7 @@ impl<'tcx> Rvalue<'tcx> { )) } &Rvalue::Len(..) => Some(tcx.types.usize), - &Rvalue::Cast(_, _, ty) => Some(ty), + &Rvalue::Cast(.., ty) => Some(ty), &Rvalue::BinaryOp(op, ref lhs, ref rhs) => { let lhs_ty = lhs.ty(mir, tcx); let rhs_ty = rhs.ty(mir, tcx); @@ -188,7 +188,7 @@ impl<'tcx> Rvalue<'tcx> { ops.iter().map(|op| op.ty(mir, tcx)).collect() )) } - AggregateKind::Adt(def, _, substs) => { + AggregateKind::Adt(def, _, substs, _) => { Some(tcx.lookup_item_type(def.did).ty.subst(tcx, substs)) } AggregateKind::Closure(did, substs) => { diff --git a/src/librustc/mir/transform.rs b/src/librustc/mir/transform.rs index 57601e6750..8cd5f5844d 100644 --- a/src/librustc/mir/transform.rs +++ b/src/librustc/mir/transform.rs @@ -15,7 +15,9 @@ use mir::mir_map::MirMap; use mir::repr::{Mir, Promoted}; use ty::TyCtxt; use syntax::ast::NodeId; +use util::common::time; +use std::borrow::Cow; use std::fmt; /// Where a specific Mir comes from. @@ -72,12 +74,12 @@ impl<'a, 'tcx> MirSource { /// Various information about pass. pub trait Pass { // fn should_run(Session) to check if pass should run? - fn name(&self) -> &str { + fn name<'a>(&self) -> Cow<'static, str> { let name = unsafe { ::std::intrinsics::type_name::() }; if let Some(tail) = name.rfind(":") { - &name[tail+1..] + Cow::from(&name[tail+1..]) } else { - name + Cow::from(name) } } fn disambiguator<'a>(&'a self) -> Option> { None } @@ -162,11 +164,10 @@ impl<'a, 'tcx> Passes { } pub fn run_passes(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, map: &mut MirMap<'tcx>) { - for pass in &mut self.plugin_passes { - pass.run_pass(tcx, map, &mut self.pass_hooks); - } - for pass in &mut self.passes { - pass.run_pass(tcx, map, &mut self.pass_hooks); + let Passes { ref mut passes, ref mut plugin_passes, ref mut pass_hooks } = *self; + for pass in plugin_passes.iter_mut().chain(passes.iter_mut()) { + time(tcx.sess.time_passes(), &*pass.name(), + || pass.run_pass(tcx, map, pass_hooks)); } } diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index ead8de86db..2c58d35973 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -103,60 +103,70 @@ macro_rules! make_mir_visitor { fn visit_statement(&mut self, block: BasicBlock, - statement: & $($mutability)* Statement<'tcx>) { - self.super_statement(block, statement); + statement: & $($mutability)* Statement<'tcx>, + location: Location) { + self.super_statement(block, statement, location); } fn visit_assign(&mut self, block: BasicBlock, lvalue: & $($mutability)* Lvalue<'tcx>, - rvalue: & $($mutability)* Rvalue<'tcx>) { - self.super_assign(block, lvalue, rvalue); + rvalue: & $($mutability)* Rvalue<'tcx>, + location: Location) { + self.super_assign(block, lvalue, rvalue, location); } fn visit_terminator(&mut self, block: BasicBlock, - terminator: & $($mutability)* Terminator<'tcx>) { - self.super_terminator(block, terminator); + terminator: & $($mutability)* Terminator<'tcx>, + location: Location) { + self.super_terminator(block, terminator, location); } fn visit_terminator_kind(&mut self, block: BasicBlock, - kind: & $($mutability)* TerminatorKind<'tcx>) { - self.super_terminator_kind(block, kind); + kind: & $($mutability)* TerminatorKind<'tcx>, + location: Location) { + self.super_terminator_kind(block, kind, location); } fn visit_assert_message(&mut self, - msg: & $($mutability)* AssertMessage<'tcx>) { - self.super_assert_message(msg); + msg: & $($mutability)* AssertMessage<'tcx>, + location: Location) { + self.super_assert_message(msg, location); } fn visit_rvalue(&mut self, - rvalue: & $($mutability)* Rvalue<'tcx>) { - self.super_rvalue(rvalue); + rvalue: & $($mutability)* Rvalue<'tcx>, + location: Location) { + self.super_rvalue(rvalue, location); } fn visit_operand(&mut self, - operand: & $($mutability)* Operand<'tcx>) { - self.super_operand(operand); + operand: & $($mutability)* Operand<'tcx>, + location: Location) { + self.super_operand(operand, location); } fn visit_lvalue(&mut self, lvalue: & $($mutability)* Lvalue<'tcx>, - context: LvalueContext) { - self.super_lvalue(lvalue, context); + context: LvalueContext<'tcx>, + location: Location) { + self.super_lvalue(lvalue, context, location); } fn visit_projection(&mut self, lvalue: & $($mutability)* LvalueProjection<'tcx>, - context: LvalueContext) { - self.super_projection(lvalue, context); + context: LvalueContext, + location: Location) { + self.super_projection(lvalue, context, location); } fn visit_projection_elem(&mut self, lvalue: & $($mutability)* LvalueElem<'tcx>, - context: LvalueContext) { - self.super_projection_elem(lvalue, context); + context: LvalueContext, + location: Location) { + self.super_projection_elem(lvalue, context, location); } fn visit_branch(&mut self, @@ -166,17 +176,20 @@ macro_rules! make_mir_visitor { } fn visit_constant(&mut self, - constant: & $($mutability)* Constant<'tcx>) { - self.super_constant(constant); + constant: & $($mutability)* Constant<'tcx>, + location: Location) { + self.super_constant(constant, location); } fn visit_literal(&mut self, - literal: & $($mutability)* Literal<'tcx>) { - self.super_literal(literal); + literal: & $($mutability)* Literal<'tcx>, + location: Location) { + self.super_literal(literal, location); } fn visit_def_id(&mut self, - def_id: & $($mutability)* DefId) { + def_id: & $($mutability)* DefId, + _: Location) { self.super_def_id(def_id); } @@ -206,18 +219,21 @@ macro_rules! make_mir_visitor { } fn visit_const_val(&mut self, - const_val: & $($mutability)* ConstVal) { + const_val: & $($mutability)* ConstVal, + _: Location) { self.super_const_val(const_val); } fn visit_const_usize(&mut self, - const_usize: & $($mutability)* ConstUsize) { + const_usize: & $($mutability)* ConstUsize, + _: Location) { self.super_const_usize(const_usize); } fn visit_typed_const_val(&mut self, - val: & $($mutability)* TypedConstVal<'tcx>) { - self.super_typed_const_val(val); + val: & $($mutability)* TypedConstVal<'tcx>, + location: Location) { + self.super_typed_const_val(val, location); } fn visit_var_decl(&mut self, @@ -280,12 +296,16 @@ macro_rules! make_mir_visitor { is_cleanup: _ } = *data; + let mut index = 0; for statement in statements { - self.visit_statement(block, statement); + let location = Location { block: block, statement_index: index }; + self.visit_statement(block, statement, location); + index += 1; } if let Some(ref $($mutability)* terminator) = *terminator { - self.visit_terminator(block, terminator); + let location = Location { block: block, statement_index: index }; + self.visit_terminator(block, terminator, location); } } @@ -304,7 +324,8 @@ macro_rules! make_mir_visitor { fn super_statement(&mut self, block: BasicBlock, - statement: & $($mutability)* Statement<'tcx>) { + statement: & $($mutability)* Statement<'tcx>, + location: Location) { let Statement { ref $($mutability)* source_info, ref $($mutability)* kind, @@ -314,43 +335,47 @@ macro_rules! make_mir_visitor { match *kind { StatementKind::Assign(ref $($mutability)* lvalue, ref $($mutability)* rvalue) => { - self.visit_assign(block, lvalue, rvalue); + self.visit_assign(block, lvalue, rvalue, location); } StatementKind::SetDiscriminant{ ref $($mutability)* lvalue, .. } => { - self.visit_lvalue(lvalue, LvalueContext::Store); + self.visit_lvalue(lvalue, LvalueContext::Store, location); } StatementKind::StorageLive(ref $($mutability)* lvalue) => { - self.visit_lvalue(lvalue, LvalueContext::StorageLive); + self.visit_lvalue(lvalue, LvalueContext::StorageLive, location); } StatementKind::StorageDead(ref $($mutability)* lvalue) => { - self.visit_lvalue(lvalue, LvalueContext::StorageDead); + self.visit_lvalue(lvalue, LvalueContext::StorageDead, location); } + StatementKind::Nop => {} } } fn super_assign(&mut self, _block: BasicBlock, lvalue: &$($mutability)* Lvalue<'tcx>, - rvalue: &$($mutability)* Rvalue<'tcx>) { - self.visit_lvalue(lvalue, LvalueContext::Store); - self.visit_rvalue(rvalue); + rvalue: &$($mutability)* Rvalue<'tcx>, + location: Location) { + self.visit_lvalue(lvalue, LvalueContext::Store, location); + self.visit_rvalue(rvalue, location); } fn super_terminator(&mut self, block: BasicBlock, - terminator: &$($mutability)* Terminator<'tcx>) { + terminator: &$($mutability)* Terminator<'tcx>, + location: Location) { let Terminator { ref $($mutability)* source_info, ref $($mutability)* kind, } = *terminator; self.visit_source_info(source_info); - self.visit_terminator_kind(block, kind); + self.visit_terminator_kind(block, kind, location); } fn super_terminator_kind(&mut self, block: BasicBlock, - kind: & $($mutability)* TerminatorKind<'tcx>) { + kind: & $($mutability)* TerminatorKind<'tcx>, + source_location: Location) { match *kind { TerminatorKind::Goto { target } => { self.visit_branch(block, target); @@ -358,7 +383,7 @@ macro_rules! make_mir_visitor { TerminatorKind::If { ref $($mutability)* cond, ref $($mutability)* targets } => { - self.visit_operand(cond); + self.visit_operand(cond, source_location); for &target in targets.as_slice() { self.visit_branch(block, target); } @@ -367,7 +392,7 @@ macro_rules! make_mir_visitor { TerminatorKind::Switch { ref $($mutability)* discr, adt_def: _, ref targets } => { - self.visit_lvalue(discr, LvalueContext::Inspect); + self.visit_lvalue(discr, LvalueContext::Inspect, source_location); for &target in targets { self.visit_branch(block, target); } @@ -377,10 +402,10 @@ macro_rules! make_mir_visitor { ref $($mutability)* switch_ty, ref $($mutability)* values, ref targets } => { - self.visit_lvalue(discr, LvalueContext::Inspect); + self.visit_lvalue(discr, LvalueContext::Inspect, source_location); self.visit_ty(switch_ty); for value in values { - self.visit_const_val(value); + self.visit_const_val(value, source_location); } for &target in targets { self.visit_branch(block, target); @@ -395,7 +420,7 @@ macro_rules! make_mir_visitor { TerminatorKind::Drop { ref $($mutability)* location, target, unwind } => { - self.visit_lvalue(location, LvalueContext::Drop); + self.visit_lvalue(location, LvalueContext::Drop, source_location); self.visit_branch(block, target); unwind.map(|t| self.visit_branch(block, t)); } @@ -404,8 +429,8 @@ macro_rules! make_mir_visitor { ref $($mutability)* value, target, unwind } => { - self.visit_lvalue(location, LvalueContext::Drop); - self.visit_operand(value); + self.visit_lvalue(location, LvalueContext::Drop, source_location); + self.visit_operand(value, source_location); self.visit_branch(block, target); unwind.map(|t| self.visit_branch(block, t)); } @@ -414,12 +439,12 @@ macro_rules! make_mir_visitor { ref $($mutability)* args, ref $($mutability)* destination, cleanup } => { - self.visit_operand(func); + self.visit_operand(func, source_location); for arg in args { - self.visit_operand(arg); + self.visit_operand(arg, source_location); } if let Some((ref $($mutability)* destination, target)) = *destination { - self.visit_lvalue(destination, LvalueContext::Call); + self.visit_lvalue(destination, LvalueContext::Call, source_location); self.visit_branch(block, target); } cleanup.map(|t| self.visit_branch(block, t)); @@ -430,8 +455,8 @@ macro_rules! make_mir_visitor { ref $($mutability)* msg, target, cleanup } => { - self.visit_operand(cond); - self.visit_assert_message(msg); + self.visit_operand(cond, source_location); + self.visit_assert_message(msg, source_location); self.visit_branch(block, target); cleanup.map(|t| self.visit_branch(block, t)); } @@ -439,47 +464,49 @@ macro_rules! make_mir_visitor { } fn super_assert_message(&mut self, - msg: & $($mutability)* AssertMessage<'tcx>) { + msg: & $($mutability)* AssertMessage<'tcx>, + location: Location) { match *msg { AssertMessage::BoundsCheck { ref $($mutability)* len, ref $($mutability)* index } => { - self.visit_operand(len); - self.visit_operand(index); + self.visit_operand(len, location); + self.visit_operand(index, location); } AssertMessage::Math(_) => {} } } fn super_rvalue(&mut self, - rvalue: & $($mutability)* Rvalue<'tcx>) { + rvalue: & $($mutability)* Rvalue<'tcx>, + location: Location) { match *rvalue { Rvalue::Use(ref $($mutability)* operand) => { - self.visit_operand(operand); + self.visit_operand(operand, location); } Rvalue::Repeat(ref $($mutability)* value, ref $($mutability)* typed_const_val) => { - self.visit_operand(value); - self.visit_typed_const_val(typed_const_val); + self.visit_operand(value, location); + self.visit_typed_const_val(typed_const_val, location); } Rvalue::Ref(r, bk, ref $($mutability)* path) => { self.visit_lvalue(path, LvalueContext::Borrow { region: r, kind: bk - }); + }, location); } Rvalue::Len(ref $($mutability)* path) => { - self.visit_lvalue(path, LvalueContext::Inspect); + self.visit_lvalue(path, LvalueContext::Inspect, location); } Rvalue::Cast(_cast_kind, ref $($mutability)* operand, ref $($mutability)* ty) => { - self.visit_operand(operand); + self.visit_operand(operand, location); self.visit_ty(ty); } @@ -489,12 +516,12 @@ macro_rules! make_mir_visitor { Rvalue::CheckedBinaryOp(_bin_op, ref $($mutability)* lhs, ref $($mutability)* rhs) => { - self.visit_operand(lhs); - self.visit_operand(rhs); + self.visit_operand(lhs, location); + self.visit_operand(rhs, location); } Rvalue::UnaryOp(_un_op, ref $($mutability)* op) => { - self.visit_operand(op); + self.visit_operand(op, location); } Rvalue::Box(ref $($mutability)* ty) => { @@ -510,18 +537,19 @@ macro_rules! make_mir_visitor { } AggregateKind::Adt(_adt_def, _variant_index, - ref $($mutability)* substs) => { + ref $($mutability)* substs, + _active_field_index) => { self.visit_substs(substs); } AggregateKind::Closure(ref $($mutability)* def_id, ref $($mutability)* closure_substs) => { - self.visit_def_id(def_id); + self.visit_def_id(def_id, location); self.visit_closure_substs(closure_substs); } } for operand in operands { - self.visit_operand(operand); + self.visit_operand(operand, location); } } @@ -529,30 +557,32 @@ macro_rules! make_mir_visitor { ref $($mutability)* inputs, asm: _ } => { for output in & $($mutability)* outputs[..] { - self.visit_lvalue(output, LvalueContext::Store); + self.visit_lvalue(output, LvalueContext::Store, location); } for input in & $($mutability)* inputs[..] { - self.visit_operand(input); + self.visit_operand(input, location); } } } } fn super_operand(&mut self, - operand: & $($mutability)* Operand<'tcx>) { + operand: & $($mutability)* Operand<'tcx>, + location: Location) { match *operand { Operand::Consume(ref $($mutability)* lvalue) => { - self.visit_lvalue(lvalue, LvalueContext::Consume); + self.visit_lvalue(lvalue, LvalueContext::Consume, location); } Operand::Constant(ref $($mutability)* constant) => { - self.visit_constant(constant); + self.visit_constant(constant, location); } } } fn super_lvalue(&mut self, lvalue: & $($mutability)* Lvalue<'tcx>, - context: LvalueContext) { + context: LvalueContext<'tcx>, + location: Location) { match *lvalue { Lvalue::Var(_) | Lvalue::Temp(_) | @@ -560,28 +590,35 @@ macro_rules! make_mir_visitor { Lvalue::ReturnPointer => { } Lvalue::Static(ref $($mutability)* def_id) => { - self.visit_def_id(def_id); + self.visit_def_id(def_id, location); } Lvalue::Projection(ref $($mutability)* proj) => { - self.visit_projection(proj, context); + self.visit_projection(proj, context, location); } } } fn super_projection(&mut self, proj: & $($mutability)* LvalueProjection<'tcx>, - context: LvalueContext) { + context: LvalueContext, + location: Location) { let Projection { ref $($mutability)* base, ref $($mutability)* elem, } = *proj; - self.visit_lvalue(base, LvalueContext::Projection); - self.visit_projection_elem(elem, context); + let context = if context.is_mutating_use() { + LvalueContext::Projection(Mutability::Mut) + } else { + LvalueContext::Projection(Mutability::Not) + }; + self.visit_lvalue(base, context, location); + self.visit_projection_elem(elem, context, location); } fn super_projection_elem(&mut self, proj: & $($mutability)* LvalueElem<'tcx>, - _context: LvalueContext) { + _context: LvalueContext, + location: Location) { match *proj { ProjectionElem::Deref => { } @@ -591,7 +628,7 @@ macro_rules! make_mir_visitor { self.visit_ty(ty); } ProjectionElem::Index(ref $($mutability)* operand) => { - self.visit_operand(operand); + self.visit_operand(operand, location); } ProjectionElem::ConstantIndex { offset: _, min_length: _, @@ -645,7 +682,8 @@ macro_rules! make_mir_visitor { } fn super_constant(&mut self, - constant: & $($mutability)* Constant<'tcx>) { + constant: & $($mutability)* Constant<'tcx>, + location: Location) { let Constant { ref $($mutability)* span, ref $($mutability)* ty, @@ -654,11 +692,12 @@ macro_rules! make_mir_visitor { self.visit_span(span); self.visit_ty(ty); - self.visit_literal(literal); + self.visit_literal(literal, location); } fn super_typed_const_val(&mut self, - constant: & $($mutability)* TypedConstVal<'tcx>) { + constant: & $($mutability)* TypedConstVal<'tcx>, + location: Location) { let TypedConstVal { ref $($mutability)* span, ref $($mutability)* ty, @@ -667,19 +706,20 @@ macro_rules! make_mir_visitor { self.visit_span(span); self.visit_ty(ty); - self.visit_const_usize(value); + self.visit_const_usize(value, location); } fn super_literal(&mut self, - literal: & $($mutability)* Literal<'tcx>) { + literal: & $($mutability)* Literal<'tcx>, + location: Location) { match *literal { Literal::Item { ref $($mutability)* def_id, ref $($mutability)* substs } => { - self.visit_def_id(def_id); + self.visit_def_id(def_id, location); self.visit_substs(substs); } Literal::Value { ref $($mutability)* value } => { - self.visit_const_val(value); + self.visit_const_val(value, location); } Literal::Promoted { index: _ } => {} } @@ -716,6 +756,21 @@ macro_rules! make_mir_visitor { fn super_const_usize(&mut self, _substs: & $($mutability)* ConstUsize) { } + + // Convenience methods + + fn visit_location(&mut self, mir: & $($mutability)* Mir<'tcx>, location: Location) { + let basic_block = & $($mutability)* mir[location.block]; + if basic_block.statements.len() == location.statement_index { + if let Some(ref $($mutability)* terminator) = basic_block.terminator { + self.visit_terminator(location.block, terminator, location) + } + } else { + let statement = & $($mutability)* + basic_block.statements[location.statement_index]; + self.visit_statement(location.block, statement, location) + } + } } } } @@ -724,7 +779,7 @@ make_mir_visitor!(Visitor,); make_mir_visitor!(MutVisitor,mut); #[derive(Copy, Clone, Debug)] -pub enum LvalueContext { +pub enum LvalueContext<'tcx> { // Appears as LHS of an assignment Store, @@ -738,13 +793,22 @@ pub enum LvalueContext { Inspect, // Being borrowed - Borrow { region: Region, kind: BorrowKind }, - - // Being sliced -- this should be same as being borrowed, probably - Slice { from_start: usize, from_end: usize }, - - // Used as base for another lvalue, e.g. `x` in `x.y` - Projection, + Borrow { region: &'tcx Region, kind: BorrowKind }, + + // Used as base for another lvalue, e.g. `x` in `x.y`. + // + // The `Mutability` argument specifies whether the projection is being performed in order to + // (potentially) mutate the lvalue. For example, the projection `x.y` is marked as a mutation + // in these cases: + // + // x.y = ...; + // f(&mut x.y); + // + // But not in these cases: + // + // z = x.y; + // f(&x.y); + Projection(Mutability), // Consumed as part of an operand Consume, @@ -753,3 +817,69 @@ pub enum LvalueContext { StorageLive, StorageDead, } + +impl<'tcx> LvalueContext<'tcx> { + /// Returns true if this lvalue context represents a drop. + pub fn is_drop(&self) -> bool { + match *self { + LvalueContext::Drop => true, + _ => false, + } + } + + /// Returns true if this lvalue context represents a storage live or storage dead marker. + pub fn is_storage_marker(&self) -> bool { + match *self { + LvalueContext::StorageLive | LvalueContext::StorageDead => true, + _ => false, + } + } + + /// Returns true if this lvalue context represents a storage live marker. + pub fn is_storage_live_marker(&self) -> bool { + match *self { + LvalueContext::StorageLive => true, + _ => false, + } + } + + /// Returns true if this lvalue context represents a storage dead marker. + pub fn is_storage_dead_marker(&self) -> bool { + match *self { + LvalueContext::StorageDead => true, + _ => false, + } + } + + /// Returns true if this lvalue context represents a use that potentially changes the value. + pub fn is_mutating_use(&self) -> bool { + match *self { + LvalueContext::Store | LvalueContext::Call | + LvalueContext::Borrow { kind: BorrowKind::Mut, .. } | + LvalueContext::Projection(Mutability::Mut) | + LvalueContext::Drop => true, + LvalueContext::Inspect | + LvalueContext::Borrow { kind: BorrowKind::Shared, .. } | + LvalueContext::Borrow { kind: BorrowKind::Unique, .. } | + LvalueContext::Projection(Mutability::Not) | LvalueContext::Consume | + LvalueContext::StorageLive | LvalueContext::StorageDead => false, + } + } + + /// Returns true if this lvalue context represents a use that does not change the value. + pub fn is_nonmutating_use(&self) -> bool { + match *self { + LvalueContext::Inspect | LvalueContext::Borrow { kind: BorrowKind::Shared, .. } | + LvalueContext::Borrow { kind: BorrowKind::Unique, .. } | + LvalueContext::Projection(Mutability::Not) | LvalueContext::Consume => true, + LvalueContext::Borrow { kind: BorrowKind::Mut, .. } | LvalueContext::Store | + LvalueContext::Call | LvalueContext::Projection(Mutability::Mut) | + LvalueContext::Drop | LvalueContext::StorageLive | LvalueContext::StorageDead => false, + } + } + + pub fn is_use(&self) -> bool { + self.is_mutating_use() || self.is_nonmutating_use() + } +} + diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index e988ddcd97..5c0f718ad2 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -25,7 +25,6 @@ use middle::cstore; use syntax::ast::{self, IntTy, UintTy}; use syntax::attr; -use syntax::attr::AttrMetaMethods; use syntax::parse; use syntax::parse::token::InternedString; use syntax::feature_gate::UnstableFeatures; @@ -38,9 +37,9 @@ use std::collections::btree_map::Iter as BTreeMapIter; use std::collections::btree_map::Keys as BTreeMapKeysIter; use std::collections::btree_map::Values as BTreeMapValuesIter; -use std::env; use std::fmt; -use std::hash::{Hasher, SipHasher}; +use std::hash::Hasher; +use std::collections::hash_map::DefaultHasher; use std::iter::FromIterator; use std::path::PathBuf; @@ -213,7 +212,7 @@ macro_rules! top_level_options { $warn_text, self.error_format)*]); })* - let mut hasher = SipHasher::new(); + let mut hasher = DefaultHasher::new(); dep_tracking::stable_hash(sub_hashes, &mut hasher, self.error_format); @@ -476,6 +475,7 @@ pub enum CrateType { CrateTypeRlib, CrateTypeStaticlib, CrateTypeCdylib, + CrateTypeRustcMacro, } #[derive(Clone, Hash)] @@ -493,7 +493,7 @@ impl Passes { } } -#[derive(Clone, PartialEq, Hash)] +#[derive(Clone, PartialEq, Hash, RustcEncodable, RustcDecodable)] pub enum PanicStrategy { Unwind, Abort, @@ -581,7 +581,7 @@ macro_rules! options { impl<'a> dep_tracking::DepTrackingHash for $struct_name { - fn hash(&self, hasher: &mut SipHasher, error_format: ErrorOutputType) { + fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType) { let mut sub_hashes = BTreeMap::new(); $({ hash_option!($opt, @@ -605,9 +605,8 @@ macro_rules! options { pub const parse_bool: Option<&'static str> = None; pub const parse_opt_bool: Option<&'static str> = Some("one of: `y`, `yes`, `on`, `n`, `no`, or `off`"); - pub const parse_all_bool: Option<&'static str> = - Some("one of: `y`, `yes`, `on`, `n`, `no`, or `off`"); pub const parse_string: Option<&'static str> = Some("a string"); + pub const parse_string_push: Option<&'static str> = Some("a string"); pub const parse_opt_string: Option<&'static str> = Some("a string"); pub const parse_list: Option<&'static str> = Some("a space-separated list of strings"); pub const parse_opt_list: Option<&'static str> = Some("a space-separated list of strings"); @@ -656,25 +655,6 @@ macro_rules! options { } } - fn parse_all_bool(slot: &mut bool, v: Option<&str>) -> bool { - match v { - Some(s) => { - match s { - "n" | "no" | "off" => { - *slot = false; - } - "y" | "yes" | "on" => { - *slot = true; - } - _ => { return false; } - } - - true - }, - None => { *slot = true; true } - } - } - fn parse_opt_string(slot: &mut Option, v: Option<&str>) -> bool { match v { Some(s) => { *slot = Some(s.to_string()); true }, @@ -689,6 +669,13 @@ macro_rules! options { } } + fn parse_string_push(slot: &mut Vec, v: Option<&str>) -> bool { + match v { + Some(s) => { slot.push(s.to_string()); true }, + None => false, + } + } + fn parse_list(slot: &mut Vec, v: Option<&str>) -> bool { match v { @@ -764,6 +751,8 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options, "tool to assemble archives with"), linker: Option = (None, parse_opt_string, [UNTRACKED], "system linker to link outputs with"), + link_arg: Vec = (vec![], parse_string_push, [UNTRACKED], + "a single extra argument to pass to the linker (can be used several times)"), link_args: Option> = (None, parse_opt_list, [UNTRACKED], "extra arguments to pass to the linker (space separated)"), link_dead_code: bool = (false, parse_bool, [UNTRACKED], @@ -791,7 +780,7 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options, no_vectorize_slp: bool = (false, parse_bool, [TRACKED], "don't run LLVM's SLP vectorization pass"), soft_float: bool = (false, parse_bool, [TRACKED], - "generate software floating point library calls"), + "use soft float ABI (*eabihf targets only)"), prefer_dynamic: bool = (false, parse_bool, [TRACKED], "prefer dynamic linking to static linking"), no_integrated_as: bool = (false, parse_bool, [TRACKED], @@ -870,9 +859,13 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, ls: bool = (false, parse_bool, [UNTRACKED], "list the symbols defined by a library crate"), save_analysis: bool = (false, parse_bool, [UNTRACKED], - "write syntax and type analysis (in JSON format) information in addition to normal output"), + "write syntax and type analysis (in JSON format) information, in \ + addition to normal output"), save_analysis_csv: bool = (false, parse_bool, [UNTRACKED], - "write syntax and type analysis (in CSV format) information in addition to normal output"), + "write syntax and type analysis (in CSV format) information, in addition to normal output"), + save_analysis_api: bool = (false, parse_bool, [UNTRACKED], + "write syntax and type analysis information for opaque libraries (in JSON format), \ + in addition to normal output"), print_move_fragments: bool = (false, parse_bool, [UNTRACKED], "print out move-fragment data for every fn"), flowgraph_print_loans: bool = (false, parse_bool, [UNTRACKED], @@ -910,10 +903,10 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "adds unstable command line options to rustc interface"), force_overflow_checks: Option = (None, parse_opt_bool, [TRACKED], "force overflow checks on or off"), - force_dropflag_checks: Option = (None, parse_opt_bool, [TRACKED], - "force drop flag checks on or off"), trace_macros: bool = (false, parse_bool, [UNTRACKED], "for every macro invocation, print its name and arguments"), + debug_macros: bool = (false, parse_bool, [TRACKED], + "emit line numbers debug info inside macros"), enable_nonzeroing_move_hints: bool = (false, parse_bool, [TRACKED], "force nonzeroing move optimization on"), keep_hygiene_data: bool = (false, parse_bool, [UNTRACKED], @@ -930,8 +923,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "dump MIR state at various points in translation"), dump_mir_dir: Option = (None, parse_opt_string, [UNTRACKED], "the directory the MIR is dumped into"), - orbit: bool = (true, parse_all_bool, [UNTRACKED], - "get MIR where it belongs - everywhere; most importantly, in orbit"), + perf_stats: bool = (false, parse_bool, [UNTRACKED], + "print some performance-related statistics"), } pub fn default_lib_output() -> CrateType { @@ -986,6 +979,9 @@ pub fn default_configuration(sess: &Session) -> ast::CrateConfig { if sess.opts.debug_assertions { ret.push(attr::mk_word_item(InternedString::new("debug_assertions"))); } + if sess.opts.crate_types.contains(&CrateTypeRustcMacro) { + ret.push(attr::mk_word_item(InternedString::new("rustc_macro"))); + } return ret; } @@ -1324,15 +1320,7 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) }) }); - let mut debugging_opts = build_debugging_options(matches, error_format); - - // Incremental compilation only works reliably when translation is done via - // MIR, so let's enable -Z orbit if necessary (see #34973). - if debugging_opts.incremental.is_some() && !debugging_opts.orbit { - early_warn(error_format, "Automatically enabling `-Z orbit` because \ - `-Z incremental` was specified"); - debugging_opts.orbit = true; - } + let debugging_opts = build_debugging_options(matches, error_format); let mir_opt_level = debugging_opts.mir_opt_level.unwrap_or(1); @@ -1547,27 +1535,12 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) crate_name: crate_name, alt_std_name: None, libs: libs, - unstable_features: get_unstable_features_setting(), + unstable_features: UnstableFeatures::from_environment(), debug_assertions: debug_assertions, }, cfg) } -pub fn get_unstable_features_setting() -> UnstableFeatures { - // Whether this is a feature-staged build, i.e. on the beta or stable channel - let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); - // The secret key needed to get through the rustc build itself by - // subverting the unstable features lints - let bootstrap_secret_key = option_env!("CFG_BOOTSTRAP_KEY"); - // The matching key to the above, only known by the build system - let bootstrap_provided_key = env::var("RUSTC_BOOTSTRAP_KEY").ok(); - match (disable_unstable_features, bootstrap_secret_key, bootstrap_provided_key) { - (_, Some(ref s), Some(ref p)) if s == p => UnstableFeatures::Cheat, - (true, _, _) => UnstableFeatures::Disallow, - (false, _, _) => UnstableFeatures::Allow - } -} - pub fn parse_crate_types_from_list(list_list: Vec) -> Result, String> { let mut crate_types: Vec = Vec::new(); for unparsed_crate_type in &list_list { @@ -1579,6 +1552,7 @@ pub fn parse_crate_types_from_list(list_list: Vec) -> Result CrateTypeDylib, "cdylib" => CrateTypeCdylib, "bin" => CrateTypeExecutable, + "rustc-macro" => CrateTypeRustcMacro, _ => { return Err(format!("unknown crate type: `{}`", part)); @@ -1596,7 +1570,7 @@ pub fn parse_crate_types_from_list(list_list: Vec) -> Result bool { @@ -1604,18 +1578,13 @@ pub mod nightly_options { } pub fn is_nightly_build() -> bool { - match get_unstable_features_setting() { - UnstableFeatures::Allow | UnstableFeatures::Cheat => true, - _ => false, - } + UnstableFeatures::from_environment().is_nightly_build() } pub fn check_nightly_options(matches: &getopts::Matches, flags: &[RustcOptGroup]) { let has_z_unstable_option = matches.opt_strs("Z").iter().any(|x| *x == "unstable-options"); - let really_allows_unstable_options = match get_unstable_features_setting() { - UnstableFeatures::Disallow => false, - _ => true, - }; + let really_allows_unstable_options = UnstableFeatures::from_environment() + .is_nightly_build(); for opt in flags.iter() { if opt.stability == OptionStability::Stable { @@ -1667,6 +1636,7 @@ impl fmt::Display for CrateType { CrateTypeRlib => "rlib".fmt(f), CrateTypeStaticlib => "staticlib".fmt(f), CrateTypeCdylib => "cdylib".fmt(f), + CrateTypeRustcMacro => "rustc-macro".fmt(f), } } } @@ -1694,20 +1664,21 @@ mod dep_tracking { use middle::cstore; use session::search_paths::{PathKind, SearchPaths}; use std::collections::BTreeMap; - use std::hash::{Hash, SipHasher}; + use std::hash::Hash; use std::path::PathBuf; use super::{Passes, PanicStrategy, CrateType, OptLevel, DebugInfoLevel, OutputTypes, Externs, ErrorOutputType}; + use std::collections::hash_map::DefaultHasher; use syntax::feature_gate::UnstableFeatures; pub trait DepTrackingHash { - fn hash(&self, &mut SipHasher, ErrorOutputType); + fn hash(&self, &mut DefaultHasher, ErrorOutputType); } macro_rules! impl_dep_tracking_hash_via_hash { ($t:ty) => ( impl DepTrackingHash for $t { - fn hash(&self, hasher: &mut SipHasher, _: ErrorOutputType) { + fn hash(&self, hasher: &mut DefaultHasher, _: ErrorOutputType) { Hash::hash(self, hasher); } } @@ -1717,7 +1688,7 @@ mod dep_tracking { macro_rules! impl_dep_tracking_hash_for_sortable_vec_of { ($t:ty) => ( impl DepTrackingHash for Vec<$t> { - fn hash(&self, hasher: &mut SipHasher, error_format: ErrorOutputType) { + fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType) { let mut elems: Vec<&$t> = self.iter().collect(); elems.sort(); Hash::hash(&elems.len(), hasher); @@ -1755,7 +1726,7 @@ mod dep_tracking { impl_dep_tracking_hash_for_sortable_vec_of!((String, cstore::NativeLibraryKind)); impl DepTrackingHash for SearchPaths { - fn hash(&self, hasher: &mut SipHasher, _: ErrorOutputType) { + fn hash(&self, hasher: &mut DefaultHasher, _: ErrorOutputType) { let mut elems: Vec<_> = self .iter(PathKind::All) .collect(); @@ -1768,7 +1739,7 @@ mod dep_tracking { where T1: DepTrackingHash, T2: DepTrackingHash { - fn hash(&self, hasher: &mut SipHasher, error_format: ErrorOutputType) { + fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType) { Hash::hash(&0, hasher); DepTrackingHash::hash(&self.0, hasher, error_format); Hash::hash(&1, hasher); @@ -1778,7 +1749,7 @@ mod dep_tracking { // This is a stable hash because BTreeMap is a sorted container pub fn stable_hash(sub_hashes: BTreeMap<&'static str, &DepTrackingHash>, - hasher: &mut SipHasher, + hasher: &mut DefaultHasher, error_format: ErrorOutputType) { for (key, sub_hash) in sub_hashes { // Using Hash::hash() instead of DepTrackingHash::hash() is fine for @@ -1804,8 +1775,9 @@ mod tests { use std::path::PathBuf; use std::rc::Rc; use super::{OutputType, OutputTypes, Externs, PanicStrategy}; - use syntax::attr; - use syntax::attr::AttrMetaMethods; + use syntax::{ast, attr}; + use syntax::parse::token::InternedString; + use syntax::codemap::dummy_spanned; fn optgroups() -> Vec { super::rustc_optgroups().into_iter() @@ -1834,7 +1806,9 @@ mod tests { let (sessopts, cfg) = build_session_options_and_crate_config(matches); let sess = build_session(sessopts, &dep_graph, None, registry, Rc::new(DummyCrateStore)); let cfg = build_configuration(&sess, cfg); - assert!((attr::contains_name(&cfg[..], "test"))); + assert!(attr::contains(&cfg, &dummy_spanned(ast::MetaItemKind::Word({ + InternedString::new("test") + })))); } // When the user supplies --test and --cfg test, don't implicitly add @@ -2388,6 +2362,8 @@ mod tests { assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); opts.debugging_opts.save_analysis_csv = true; assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.save_analysis_api = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); opts.debugging_opts.print_move_fragments = true; assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); opts.debugging_opts.flowgraph_print_loans = true; @@ -2424,8 +2400,6 @@ mod tests { assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); opts.debugging_opts.dump_mir_dir = Some(String::from("abc")); assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); - opts.debugging_opts.orbit = false; - assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); // Make sure changing a [TRACKED] option changes the hash opts = reference.clone(); @@ -2460,10 +2434,6 @@ mod tests { opts.debugging_opts.force_overflow_checks = Some(true); assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); - opts = reference.clone(); - opts.debugging_opts.force_dropflag_checks = Some(true); - assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); - opts = reference.clone(); opts.debugging_opts.enable_nonzeroing_move_hints = true; assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); diff --git a/src/librustc/session/mod.rs b/src/librustc/session/mod.rs index c71253aee5..268dbd70bb 100644 --- a/src/librustc/session/mod.rs +++ b/src/librustc/session/mod.rs @@ -9,7 +9,7 @@ // except according to those terms. use dep_graph::DepGraph; -use hir::def_id::DefIndex; +use hir::def_id::{CrateNum, DefIndex}; use hir::svh::Svh; use lint; use middle::cstore::CrateStore; @@ -18,9 +18,10 @@ use session::search_paths::PathKind; use session::config::{DebugInfoLevel, PanicStrategy}; use ty::tls; use util::nodemap::{NodeMap, FnvHashMap}; +use util::common::duration_to_secs_str; use mir::transform as mir_pass; -use syntax::ast::{NodeId, Name}; +use syntax::ast::NodeId; use errors::{self, DiagnosticBuilder}; use errors::emitter::{Emitter, EmitterWriter}; use syntax::json::JsonEmitter; @@ -33,15 +34,17 @@ use syntax::feature_gate::AttributeType; use syntax_pos::{Span, MultiSpan}; use rustc_back::target::Target; +use rustc_data_structures::flock; use llvm; use std::path::{Path, PathBuf}; -use std::cell::{Cell, RefCell}; -use std::collections::{HashMap, HashSet}; +use std::cell::{self, Cell, RefCell}; +use std::collections::HashMap; use std::env; use std::ffi::CString; use std::rc::Rc; use std::fmt; +use std::time::Duration; use libc::c_int; pub mod config; @@ -61,6 +64,7 @@ pub struct Session { pub entry_fn: RefCell>, pub entry_type: Cell>, pub plugin_registrar_fn: Cell>, + pub derive_registrar_fn: Cell>, pub default_sysroot: Option, // The name of the root source file of the crate, in the local file system. // The path is always expected to be absolute. `None` means that there is no @@ -89,21 +93,33 @@ pub struct Session { /// The metadata::creader module may inject an allocator/panic_runtime /// dependency if it didn't already find one, and this tracks what was /// injected. - pub injected_allocator: Cell>, - pub injected_panic_runtime: Cell>, - - /// Names of all bang-style macros and syntax extensions - /// available in this crate - pub available_macros: RefCell>, + pub injected_allocator: Cell>, + pub injected_panic_runtime: Cell>, /// Map from imported macro spans (which consist of /// the localized span for the macro body) to the /// macro name and defintion span in the source crate. pub imported_macro_spans: RefCell>, + incr_comp_session: RefCell, + + /// Some measurements that are being gathered during compilation. + pub perf_stats: PerfStats, + next_node_id: Cell, } +pub struct PerfStats { + // The accumulated time needed for computing the SVH of the crate + pub svh_time: Cell, + // The accumulated time spent on computing incr. comp. hashes + pub incr_comp_hashes_time: Cell, + // The number of incr. comp. hash computations performed + pub incr_comp_hashes_count: Cell, + // The accumulated time spent on computing symbol hashes + pub symbol_hash_time: Cell, +} + impl Session { pub fn local_crate_disambiguator(&self) -> token::InternedString { self.crate_disambiguator.borrow().clone() @@ -250,11 +266,13 @@ impl Session { } lints.insert(id, vec!((lint_id, sp, msg))); } - pub fn reserve_node_ids(&self, count: ast::NodeId) -> ast::NodeId { + pub fn reserve_node_ids(&self, count: usize) -> ast::NodeId { let id = self.next_node_id.get(); - match id.checked_add(count) { - Some(next) => self.next_node_id.set(next), + match id.as_usize().checked_add(count) { + Some(next) => { + self.next_node_id.set(ast::NodeId::new(next)); + } None => bug!("Input too large, ran out of node ids!") } @@ -311,6 +329,12 @@ impl Session { format!("__rustc_plugin_registrar__{}_{}", svh, index.as_usize()) } + pub fn generate_derive_registrar_symbol(&self, + svh: &Svh, + index: DefIndex) -> String { + format!("__rustc_derive_registrar__{}_{}", svh, index.as_usize()) + } + pub fn sysroot<'a>(&'a self) -> &'a Path { match self.opts.maybe_sysroot { Some (ref sysroot) => sysroot, @@ -331,6 +355,87 @@ impl Session { &self.opts.search_paths, kind) } + + pub fn init_incr_comp_session(&self, + session_dir: PathBuf, + lock_file: flock::Lock) { + let mut incr_comp_session = self.incr_comp_session.borrow_mut(); + + if let IncrCompSession::NotInitialized = *incr_comp_session { } else { + bug!("Trying to initialize IncrCompSession `{:?}`", *incr_comp_session) + } + + *incr_comp_session = IncrCompSession::Active { + session_directory: session_dir, + lock_file: lock_file, + }; + } + + pub fn finalize_incr_comp_session(&self, new_directory_path: PathBuf) { + let mut incr_comp_session = self.incr_comp_session.borrow_mut(); + + if let IncrCompSession::Active { .. } = *incr_comp_session { } else { + bug!("Trying to finalize IncrCompSession `{:?}`", *incr_comp_session) + } + + // Note: This will also drop the lock file, thus unlocking the directory + *incr_comp_session = IncrCompSession::Finalized { + session_directory: new_directory_path, + }; + } + + pub fn mark_incr_comp_session_as_invalid(&self) { + let mut incr_comp_session = self.incr_comp_session.borrow_mut(); + + let session_directory = match *incr_comp_session { + IncrCompSession::Active { ref session_directory, .. } => { + session_directory.clone() + } + _ => bug!("Trying to invalidate IncrCompSession `{:?}`", + *incr_comp_session), + }; + + // Note: This will also drop the lock file, thus unlocking the directory + *incr_comp_session = IncrCompSession::InvalidBecauseOfErrors { + session_directory: session_directory + }; + } + + pub fn incr_comp_session_dir(&self) -> cell::Ref { + let incr_comp_session = self.incr_comp_session.borrow(); + cell::Ref::map(incr_comp_session, |incr_comp_session| { + match *incr_comp_session { + IncrCompSession::NotInitialized => { + bug!("Trying to get session directory from IncrCompSession `{:?}`", + *incr_comp_session) + } + IncrCompSession::Active { ref session_directory, .. } | + IncrCompSession::Finalized { ref session_directory } | + IncrCompSession::InvalidBecauseOfErrors { ref session_directory } => { + session_directory + } + } + }) + } + + pub fn incr_comp_session_dir_opt(&self) -> Option> { + if self.opts.incremental.is_some() { + Some(self.incr_comp_session_dir()) + } else { + None + } + } + + pub fn print_perf_stats(&self) { + println!("Total time spent computing SVHs: {}", + duration_to_secs_str(self.perf_stats.svh_time.get())); + println!("Total time spent computing incr. comp. hashes: {}", + duration_to_secs_str(self.perf_stats.incr_comp_hashes_time.get())); + println!("Total number of incr. comp. hashes computed: {}", + self.perf_stats.incr_comp_hashes_count.get()); + println!("Total time spent computing symbol hashes: {}", + duration_to_secs_str(self.perf_stats.symbol_hash_time.get())); + } } pub fn build_session(sopts: config::Options, @@ -428,6 +533,7 @@ pub fn build_session_(sopts: config::Options, entry_fn: RefCell::new(None), entry_type: Cell::new(None), plugin_registrar_fn: Cell::new(None), + derive_registrar_fn: Cell::new(None), default_sysroot: default_sysroot, local_crate_source_file: local_crate_source_file, working_dir: env::current_dir().unwrap(), @@ -441,11 +547,17 @@ pub fn build_session_(sopts: config::Options, crate_disambiguator: RefCell::new(token::intern("").as_str()), features: RefCell::new(feature_gate::Features::new()), recursion_limit: Cell::new(64), - next_node_id: Cell::new(1), + next_node_id: Cell::new(NodeId::new(1)), injected_allocator: Cell::new(None), injected_panic_runtime: Cell::new(None), - available_macros: RefCell::new(HashSet::new()), imported_macro_spans: RefCell::new(HashMap::new()), + incr_comp_session: RefCell::new(IncrCompSession::NotInitialized), + perf_stats: PerfStats { + svh_time: Cell::new(Duration::from_secs(0)), + incr_comp_hashes_time: Cell::new(Duration::from_secs(0)), + incr_comp_hashes_count: Cell::new(0), + symbol_hash_time: Cell::new(Duration::from_secs(0)), + } }; init_llvm(&sess); @@ -453,6 +565,31 @@ pub fn build_session_(sopts: config::Options, sess } +/// Holds data on the current incremental compilation session, if there is one. +#[derive(Debug)] +pub enum IncrCompSession { + // This is the state the session will be in until the incr. comp. dir is + // needed. + NotInitialized, + // This is the state during which the session directory is private and can + // be modified. + Active { + session_directory: PathBuf, + lock_file: flock::Lock, + }, + // This is the state after the session directory has been finalized. In this + // state, the contents of the directory must not be modified any more. + Finalized { + session_directory: PathBuf, + }, + // This is an error state that is reached when some compilation error has + // occurred. It indicates that the contents of the session directory must + // not be used, since they might be invalid. + InvalidBecauseOfErrors { + session_directory: PathBuf, + } +} + fn init_llvm(sess: &Session) { unsafe { // Before we touch LLVM, make sure that multithreading is enabled. diff --git a/src/librustc/traits/coherence.rs b/src/librustc/traits/coherence.rs index b38f5f96de..68c88249ec 100644 --- a/src/librustc/traits/coherence.rs +++ b/src/librustc/traits/coherence.rs @@ -12,9 +12,7 @@ use super::{SelectionContext, Obligation, ObligationCause}; -use middle::cstore::LOCAL_CRATE; -use hir::def_id::DefId; -use ty::subst::TypeSpace; +use hir::def_id::{DefId, LOCAL_CRATE}; use ty::{self, Ty, TyCtxt}; use infer::{InferCtxt, TypeOrigin}; use syntax_pos::DUMMY_SP; @@ -160,12 +158,9 @@ fn orphan_check_trait_ref<'tcx>(tcx: TyCtxt, // First, create an ordered iterator over all the type parameters to the trait, with the self // type appearing first. - let input_tys = Some(trait_ref.self_ty()); - let input_tys = input_tys.iter().chain(trait_ref.substs.types.get_slice(TypeSpace)); - // Find the first input type that either references a type parameter OR // some local type. - for input_ty in input_tys { + for input_ty in trait_ref.input_types() { if ty_is_local(tcx, input_ty, infer_is_local) { debug!("orphan_check_trait_ref: ty_is_local `{:?}`", input_ty); @@ -228,10 +223,10 @@ fn fundamental_ty(tcx: TyCtxt, ty: Ty) -> bool { match ty.sty { ty::TyBox(..) | ty::TyRef(..) => true, - ty::TyEnum(def, _) | ty::TyStruct(def, _) => + ty::TyAdt(def, _) => def.is_fundamental(), ty::TyTrait(ref data) => - tcx.has_attr(data.principal_def_id(), "fundamental"), + tcx.has_attr(data.principal.def_id(), "fundamental"), _ => false } @@ -264,8 +259,7 @@ fn ty_is_local_constructor(tcx: TyCtxt, ty: Ty, infer_is_local: InferIsLocal)-> infer_is_local.0 } - ty::TyEnum(def, _) | - ty::TyStruct(def, _) => { + ty::TyAdt(def, _) => { def.did.is_local() } @@ -275,7 +269,7 @@ fn ty_is_local_constructor(tcx: TyCtxt, ty: Ty, infer_is_local: InferIsLocal)-> } ty::TyTrait(ref tt) => { - tt.principal_def_id().is_local() + tt.principal.def_id().is_local() } ty::TyError => { diff --git a/src/librustc/traits/error_reporting.rs b/src/librustc/traits/error_reporting.rs index cf004767b2..52ddd8ab5d 100644 --- a/src/librustc/traits/error_reporting.rs +++ b/src/librustc/traits/error_reporting.rs @@ -27,37 +27,32 @@ use super::{ use fmt_macros::{Parser, Piece, Position}; use hir::def_id::DefId; use infer::{self, InferCtxt, TypeOrigin}; -use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; +use ty::{self, AdtKind, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; use ty::error::ExpectedFound; use ty::fast_reject; use ty::fold::TypeFolder; -use ty::subst::{self, Subst, TypeSpace}; +use ty::subst::Subst; use util::nodemap::{FnvHashMap, FnvHashSet}; use std::cmp; use std::fmt; -use syntax::ast; -use syntax::attr::{AttributeMethods, AttrMetaMethods}; use syntax_pos::Span; use errors::DiagnosticBuilder; #[derive(Debug, PartialEq, Eq, Hash)] pub struct TraitErrorKey<'tcx> { span: Span, - warning_node_id: Option, predicate: ty::Predicate<'tcx> } impl<'a, 'gcx, 'tcx> TraitErrorKey<'tcx> { fn from_error(infcx: &InferCtxt<'a, 'gcx, 'tcx>, - e: &FulfillmentError<'tcx>, - warning_node_id: Option) -> Self { + e: &FulfillmentError<'tcx>) -> Self { let predicate = infcx.resolve_type_vars_if_possible(&e.obligation.predicate); TraitErrorKey { span: e.obligation.cause.span, - predicate: infcx.tcx.erase_regions(&predicate), - warning_node_id: warning_node_id + predicate: infcx.tcx.erase_regions(&predicate) } } } @@ -65,22 +60,13 @@ impl<'a, 'gcx, 'tcx> TraitErrorKey<'tcx> { impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { pub fn report_fulfillment_errors(&self, errors: &Vec>) { for error in errors { - self.report_fulfillment_error(error, None); - } - } - - pub fn report_fulfillment_errors_as_warnings(&self, - errors: &Vec>, - node_id: ast::NodeId) { - for error in errors { - self.report_fulfillment_error(error, Some(node_id)); + self.report_fulfillment_error(error); } } fn report_fulfillment_error(&self, - error: &FulfillmentError<'tcx>, - warning_node_id: Option) { - let error_key = TraitErrorKey::from_error(self, error, warning_node_id); + error: &FulfillmentError<'tcx>) { + let error_key = TraitErrorKey::from_error(self, error); debug!("report_fulfillment_errors({:?}) - key={:?}", error, error_key); if !self.reported_trait_errors.borrow_mut().insert(error_key) { @@ -89,10 +75,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } match error.code { FulfillmentErrorCode::CodeSelectionError(ref e) => { - self.report_selection_error(&error.obligation, e, warning_node_id); + self.report_selection_error(&error.obligation, e); } FulfillmentErrorCode::CodeProjectionError(ref e) => { - self.report_projection_error(&error.obligation, e, warning_node_id); + self.report_projection_error(&error.obligation, e); } FulfillmentErrorCode::CodeAmbiguity => { self.maybe_report_ambiguity(&error.obligation); @@ -102,8 +88,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { fn report_projection_error(&self, obligation: &PredicateObligation<'tcx>, - error: &MismatchedProjectionTypes<'tcx>, - warning_node_id: Option) + error: &MismatchedProjectionTypes<'tcx>) { let predicate = self.resolve_type_vars_if_possible(&obligation.predicate); @@ -111,16 +96,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { if predicate.references_error() { return } - if let Some(warning_node_id) = warning_node_id { - self.tcx.sess.add_lint( - ::lint::builtin::UNSIZED_IN_TUPLE, - warning_node_id, - obligation.cause.span, - format!("type mismatch resolving `{}`: {}", - predicate, - error.err)); - return - } + self.probe(|_| { let origin = TypeOrigin::Misc(obligation.cause.span); let err_buf; @@ -161,35 +137,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.sess, origin.span(), E0271, "type mismatch resolving `{}`", predicate ); - self.note_type_err(&mut diag, origin, values, err); + self.note_type_err(&mut diag, origin, None, values, err); self.note_obligation_cause(&mut diag, obligation); diag.emit(); }); } - fn impl_substs(&self, - did: DefId, - obligation: PredicateObligation<'tcx>) - -> subst::Substs<'tcx> { - let tcx = self.tcx; - - let ity = tcx.lookup_item_type(did); - let (tps, rps, _) = - (ity.generics.types.get_slice(TypeSpace), - ity.generics.regions.get_slice(TypeSpace), - ity.ty); - - let rps = self.region_vars_for_defs(obligation.cause.span, rps); - let mut substs = subst::Substs::new( - subst::VecPerParamSpace::empty(), - subst::VecPerParamSpace::new(rps, Vec::new(), Vec::new())); - self.type_vars_for_defs(obligation.cause.span, - TypeSpace, - &mut substs, - tps); - substs - } - fn fuzzy_match_tys(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> bool { /// returns the fuzzy category of a given type, or None /// if the type can be equated to any type. @@ -198,30 +151,30 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ty::TyBool => Some(0), ty::TyChar => Some(1), ty::TyStr => Some(2), - ty::TyInt(..) | ty::TyUint(..) | - ty::TyInfer(ty::IntVar(..)) => Some(3), + ty::TyInt(..) | ty::TyUint(..) | ty::TyInfer(ty::IntVar(..)) => Some(3), ty::TyFloat(..) | ty::TyInfer(ty::FloatVar(..)) => Some(4), - ty::TyEnum(..) => Some(5), - ty::TyStruct(..) => Some(6), - ty::TyBox(..) | ty::TyRef(..) | ty::TyRawPtr(..) => Some(7), - ty::TyArray(..) | ty::TySlice(..) => Some(8), - ty::TyFnDef(..) | ty::TyFnPtr(..) => Some(9), - ty::TyTrait(..) => Some(10), - ty::TyClosure(..) => Some(11), - ty::TyTuple(..) => Some(12), - ty::TyProjection(..) => Some(13), - ty::TyParam(..) => Some(14), - ty::TyAnon(..) => Some(15), - ty::TyNever => Some(16), + ty::TyBox(..) | ty::TyRef(..) | ty::TyRawPtr(..) => Some(5), + ty::TyArray(..) | ty::TySlice(..) => Some(6), + ty::TyFnDef(..) | ty::TyFnPtr(..) => Some(7), + ty::TyTrait(..) => Some(8), + ty::TyClosure(..) => Some(9), + ty::TyTuple(..) => Some(10), + ty::TyProjection(..) => Some(11), + ty::TyParam(..) => Some(12), + ty::TyAnon(..) => Some(13), + ty::TyNever => Some(14), + ty::TyAdt(adt, ..) => match adt.adt_kind() { + AdtKind::Struct => Some(15), + AdtKind::Union => Some(16), + AdtKind::Enum => Some(17), + }, ty::TyInfer(..) | ty::TyError => None } } match (type_category(a), type_category(b)) { (Some(cat_a), Some(cat_b)) => match (&a.sty, &b.sty) { - (&ty::TyStruct(def_a, _), &ty::TyStruct(def_b, _)) | - (&ty::TyEnum(def_a, _), &ty::TyEnum(def_b, _)) => - def_a == def_b, + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => def_a == def_b, _ => cat_a == cat_b }, // infer and error can be equated to all types @@ -244,18 +197,19 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.lookup_trait_def(trait_ref.def_id) .for_each_relevant_impl(self.tcx, trait_self_ty, |def_id| { + let impl_substs = self.fresh_substs_for_item(obligation.cause.span, def_id); let impl_trait_ref = tcx .impl_trait_ref(def_id) .unwrap() - .subst(tcx, &self.impl_substs(def_id, obligation.clone())); + .subst(tcx, impl_substs); let impl_self_ty = impl_trait_ref.self_ty(); if let Ok(..) = self.can_equate(&trait_self_ty, &impl_self_ty) { self_match_impls.push(def_id); - if trait_ref.substs.types.get_slice(TypeSpace).iter() - .zip(impl_trait_ref.substs.types.get_slice(TypeSpace)) + if trait_ref.substs.types().skip(1) + .zip(impl_trait_ref.substs.types().skip(1)) .all(|(u,v)| self.fuzzy_match_tys(u, v)) { fuzzy_match_impls.push(def_id); @@ -293,14 +247,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { let def = self.tcx.lookup_trait_def(trait_ref.def_id); let trait_str = def.trait_ref.to_string(); if let Some(ref istring) = item.value_str() { - let mut generic_map = def.generics.types.iter_enumerated() - .map(|(param, i, gen)| { - (gen.name.as_str().to_string(), - trait_ref.substs.types.get(param, i) - .to_string()) - }).collect::>(); - generic_map.insert("Self".to_string(), - trait_ref.self_ty().to_string()); + let generic_map = def.generics.types.iter().map(|param| { + (param.name.as_str().to_string(), + trait_ref.substs.type_for_def(param).to_string()) + }).collect::>(); let parser = Parser::new(&istring); let mut errored = false; let err: String = parser.filter_map(|p| { @@ -469,8 +419,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { pub fn report_selection_error(&self, obligation: &PredicateObligation<'tcx>, - error: &SelectionError<'tcx>, - warning_node_id: Option) + error: &SelectionError<'tcx>) { let span = obligation.cause.span; let mut err = match *error { @@ -493,20 +442,11 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } else { let trait_ref = trait_predicate.to_poly_trait_ref(); - if let Some(warning_node_id) = warning_node_id { - self.tcx.sess.add_lint( - ::lint::builtin::UNSIZED_IN_TUPLE, - warning_node_id, - obligation.cause.span, - format!("the trait bound `{}` is not satisfied", - trait_ref.to_predicate())); - return; - } - - let mut err = struct_span_err!( - self.tcx.sess, span, E0277, + let mut err = struct_span_err!(self.tcx.sess, span, E0277, "the trait bound `{}` is not satisfied", trait_ref.to_predicate()); + err.span_label(span, &format!("trait `{}` not satisfied", + trait_ref.to_predicate())); // Try to report a help message @@ -567,15 +507,9 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ty::Predicate::ObjectSafe(trait_def_id) => { let violations = self.tcx.object_safety_violations(trait_def_id); - let err = self.tcx.report_object_safety_error(span, - trait_def_id, - warning_node_id, - violations); - if let Some(err) = err { - err - } else { - return; - } + self.tcx.report_object_safety_error(span, + trait_def_id, + violations) } ty::Predicate::ClosureKind(closure_def_id, kind) => { @@ -603,13 +537,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // (which may fail). span_bug!(span, "WF predicate not satisfied for {:?}", ty); } - - ty::Predicate::Rfc1592(ref data) => { - span_bug!( - obligation.cause.span, - "RFC1592 predicate not satisfied for {:?}", - data); - } } } } @@ -631,14 +558,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { TraitNotObjectSafe(did) => { let violations = self.tcx.object_safety_violations(did); - let err = self.tcx.report_object_safety_error(span, did, - warning_node_id, - violations); - if let Some(err) = err { - err - } else { - return; - } + self.tcx.report_object_safety_error(span, did, + violations) } }; self.note_obligation_cause(&mut err, obligation); @@ -666,24 +587,17 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn report_object_safety_error(self, span: Span, trait_def_id: DefId, - warning_node_id: Option, violations: Vec) - -> Option> + -> DiagnosticBuilder<'tcx> { - let mut err = match warning_node_id { - Some(_) => None, - None => { - let trait_str = self.item_path_str(trait_def_id); - let mut db = struct_span_err!( - self.sess, span, E0038, - "the trait `{}` cannot be made into an object", - trait_str); - db.span_label(span, - &format!("the trait `{}` cannot be made \ - into an object", trait_str)); - Some(db) - } - }; + let trait_str = self.item_path_str(trait_def_id); + let mut err = struct_span_err!( + self.sess, span, E0038, + "the trait `{}` cannot be made into an object", + trait_str); + err.span_label(span, &format!( + "the trait `{}` cannot be made into an object", trait_str + )); let mut reported_violations = FnvHashSet(); for violation in violations { @@ -723,19 +637,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { &buf } }; - match (warning_node_id, &mut err) { - (Some(node_id), &mut None) => { - self.sess.add_lint( - ::lint::builtin::OBJECT_UNSAFE_FRAGMENT, - node_id, - span, - note.to_string()); - } - (None, &mut Some(ref mut err)) => { - err.note(note); - } - _ => unreachable!() - } + err.note(note); } err } @@ -764,8 +666,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ty::Predicate::Trait(ref data) => { let trait_ref = data.to_poly_trait_ref(); let self_ty = trait_ref.self_ty(); - let all_types = &trait_ref.substs().types; - if all_types.references_error() { + if predicate.references_error() { } else { // Typically, this ambiguity should only happen if // there are unresolved type inference variables diff --git a/src/librustc/traits/fulfill.rs b/src/librustc/traits/fulfill.rs index 5ba7b914d6..65860671c4 100644 --- a/src/librustc/traits/fulfill.rs +++ b/src/librustc/traits/fulfill.rs @@ -57,9 +57,6 @@ pub struct FulfillmentContext<'tcx> { // fulfillment context. predicates: ObligationForest>, - // A list of new obligations due to RFC1592. - rfc1592_obligations: Vec>, - // A set of constraints that regionck must validate. Each // constraint has the form `T:'a`, meaning "some type `T` must // outlive the lifetime 'a". These constraints derive from @@ -93,7 +90,7 @@ pub struct FulfillmentContext<'tcx> { #[derive(Clone)] pub struct RegionObligation<'tcx> { - pub sub_region: ty::Region, + pub sub_region: &'tcx ty::Region, pub sup_type: Ty<'tcx>, pub cause: ObligationCause<'tcx>, } @@ -142,7 +139,7 @@ impl<'a, 'gcx, 'tcx> DeferredObligation<'tcx> { // Auto trait obligations on `impl Trait`. if tcx.trait_has_default_impl(predicate.def_id()) { let substs = predicate.skip_binder().trait_ref.substs; - if substs.types.as_slice().len() == 1 && substs.regions.is_empty() { + if substs.types().count() == 1 && substs.regions().next().is_none() { if let ty::TyAnon(..) = predicate.skip_binder().self_ty().sty { return true; } @@ -160,10 +157,9 @@ impl<'a, 'gcx, 'tcx> DeferredObligation<'tcx> { // We can resolve the `impl Trait` to its concrete type. if let Some(ty_scheme) = tcx.opt_lookup_item_type(def_id) { let concrete_ty = ty_scheme.ty.subst(tcx, substs); - let concrete_substs = Substs::new_trait(vec![], vec![], concrete_ty); let predicate = ty::TraitRef { def_id: self.predicate.def_id(), - substs: tcx.mk_substs(concrete_substs) + substs: Substs::new_trait(tcx, concrete_ty, &[]) }.to_predicate(); let original_obligation = Obligation::new(self.cause.clone(), @@ -193,7 +189,6 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> { pub fn new() -> FulfillmentContext<'tcx> { FulfillmentContext { predicates: ObligationForest::new(), - rfc1592_obligations: Vec::new(), region_obligations: NodeMap(), deferred_obligations: vec![], } @@ -247,7 +242,7 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> { pub fn register_region_obligation(&mut self, t_a: Ty<'tcx>, - r_b: ty::Region, + r_b: &'tcx ty::Region, cause: ObligationCause<'tcx>) { register_region_obligation(t_a, r_b, cause, &mut self.region_obligations); @@ -276,13 +271,6 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> { }); } - pub fn register_rfc1592_obligation(&mut self, - _infcx: &InferCtxt<'a, 'gcx, 'tcx>, - obligation: PredicateObligation<'tcx>) - { - self.rfc1592_obligations.push(obligation); - } - pub fn region_obligations(&self, body_id: ast::NodeId) -> &[RegionObligation<'tcx>] @@ -293,21 +281,6 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> { } } - pub fn select_rfc1592_obligations(&mut self, - infcx: &InferCtxt<'a, 'gcx, 'tcx>) - -> Result<(),Vec>> - { - while !self.rfc1592_obligations.is_empty() { - for obligation in mem::replace(&mut self.rfc1592_obligations, Vec::new()) { - self.register_predicate_obligation(infcx, obligation); - } - - self.select_all_or_error(infcx)?; - } - - Ok(()) - } - pub fn select_all_or_error(&mut self, infcx: &InferCtxt<'a, 'gcx, 'tcx>) -> Result<(),Vec>> @@ -363,7 +336,6 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> { let outcome = self.predicates.process_obligations(&mut FulfillProcessor { selcx: selcx, region_obligations: &mut self.region_obligations, - rfc1592_obligations: &mut self.rfc1592_obligations, deferred_obligations: &mut self.deferred_obligations }); debug!("select: outcome={:?}", outcome); @@ -399,7 +371,6 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> { struct FulfillProcessor<'a, 'b: 'a, 'gcx: 'tcx, 'tcx: 'b> { selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>, region_obligations: &'a mut NodeMap>>, - rfc1592_obligations: &'a mut Vec>, deferred_obligations: &'a mut Vec> } @@ -414,7 +385,6 @@ impl<'a, 'b, 'gcx, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'gcx, process_predicate(self.selcx, obligation, self.region_obligations, - self.rfc1592_obligations, self.deferred_obligations) .map(|os| os.map(|os| os.into_iter().map(|o| PendingPredicateObligation { obligation: o, @@ -441,8 +411,7 @@ fn trait_ref_type_vars<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 't { t.skip_binder() // ok b/c this check doesn't care about regions .input_types() - .iter() - .map(|t| selcx.infcx().resolve_type_vars_if_possible(t)) + .map(|t| selcx.infcx().resolve_type_vars_if_possible(&t)) .filter(|t| t.has_infer_types()) .flat_map(|t| t.walk()) .filter(|t| match t.sty { ty::TyInfer(_) => true, _ => false }) @@ -457,7 +426,6 @@ fn process_predicate<'a, 'gcx, 'tcx>( selcx: &mut SelectionContext<'a, 'gcx, 'tcx>, pending_obligation: &mut PendingPredicateObligation<'tcx>, region_obligations: &mut NodeMap>>, - rfc1592_obligations: &mut Vec>, deferred_obligations: &mut Vec>) -> Result>>, FulfillmentErrorCode<'tcx>> @@ -582,7 +550,8 @@ fn process_predicate<'a, 'gcx, 'tcx>( // Otherwise, we have something of the form // `for<'a> T: 'a where 'a not in T`, which we can treat as `T: 'static`. Some(t_a) => { - register_region_obligation(t_a, ty::ReStatic, + let r_static = selcx.tcx().mk_region(ty::ReStatic); + register_region_obligation(t_a, r_static, obligation.cause.clone(), region_obligations); Ok(Some(vec![])) @@ -645,14 +614,6 @@ fn process_predicate<'a, 'gcx, 'tcx>( s => Ok(s) } } - - ty::Predicate::Rfc1592(ref inner) => { - rfc1592_obligations.push(PredicateObligation { - predicate: ty::Predicate::clone(inner), - ..obligation.clone() - }); - Ok(Some(vec![])) - } } } @@ -692,7 +653,7 @@ fn coinductive_obligation<'a,'gcx,'tcx>(selcx: &SelectionContext<'a,'gcx,'tcx>, } fn register_region_obligation<'tcx>(t_a: Ty<'tcx>, - r_b: ty::Region, + r_b: &'tcx ty::Region, cause: ObligationCause<'tcx>, region_obligations: &mut NodeMap>>) { diff --git a/src/librustc/traits/mod.rs b/src/librustc/traits/mod.rs index dc0807ba38..7ba10d9c0a 100644 --- a/src/librustc/traits/mod.rs +++ b/src/librustc/traits/mod.rs @@ -17,7 +17,7 @@ pub use self::ObligationCauseCode::*; use hir::def_id::DefId; use middle::free_region::FreeRegionMap; -use ty::subst; +use ty::subst::Substs; use ty::{self, Ty, TyCtxt, TypeFoldable}; use infer::InferCtxt; @@ -40,7 +40,7 @@ pub use self::select::{EvaluationCache, SelectionContext, SelectionCache}; pub use self::select::{MethodMatchResult, MethodMatched, MethodAmbiguous, MethodDidNotMatch}; pub use self::select::{MethodMatchedData}; // intentionally don't export variants pub use self::specialize::{OverlapError, specialization_graph, specializes, translate_substs}; -pub use self::specialize::{SpecializesCache}; +pub use self::specialize::{SpecializesCache, find_method}; pub use self::util::elaborate_predicates; pub use self::util::supertraits; pub use self::util::Supertraits; @@ -272,7 +272,7 @@ pub enum Vtable<'tcx, N> { #[derive(Clone, PartialEq, Eq)] pub struct VtableImplData<'tcx, N> { pub impl_def_id: DefId, - pub substs: &'tcx subst::Substs<'tcx>, + pub substs: &'tcx Substs<'tcx>, pub nested: Vec } @@ -527,6 +527,88 @@ pub fn fully_normalize<'a, 'gcx, 'tcx, T>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, Ok(resolved_value) } +/// Normalizes the predicates and checks whether they hold. If this +/// returns false, then either normalize encountered an error or one +/// of the predicates did not hold. Used when creating vtables to +/// check for unsatisfiable methods. +pub fn normalize_and_test_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + predicates: Vec>) + -> bool +{ + debug!("normalize_and_test_predicates(predicates={:?})", + predicates); + + tcx.infer_ctxt(None, None, Reveal::All).enter(|infcx| { + let mut selcx = SelectionContext::new(&infcx); + let mut fulfill_cx = FulfillmentContext::new(); + let cause = ObligationCause::dummy(); + let Normalized { value: predicates, obligations } = + normalize(&mut selcx, cause.clone(), &predicates); + for obligation in obligations { + fulfill_cx.register_predicate_obligation(&infcx, obligation); + } + for predicate in predicates { + let obligation = Obligation::new(cause.clone(), predicate); + fulfill_cx.register_predicate_obligation(&infcx, obligation); + } + + fulfill_cx.select_all_or_error(&infcx).is_ok() + }) +} + +/// Given a trait `trait_ref`, iterates the vtable entries +/// that come from `trait_ref`, including its supertraits. +#[inline] // FIXME(#35870) Avoid closures being unexported due to impl Trait. +pub fn get_vtable_methods<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + trait_ref: ty::PolyTraitRef<'tcx>) + -> impl Iterator)>> + 'a +{ + debug!("get_vtable_methods({:?})", trait_ref); + + supertraits(tcx, trait_ref).flat_map(move |trait_ref| { + tcx.populate_implementations_for_trait_if_necessary(trait_ref.def_id()); + + let trait_item_def_ids = tcx.impl_or_trait_items(trait_ref.def_id()); + let trait_methods = (0..trait_item_def_ids.len()).filter_map(move |i| { + match tcx.impl_or_trait_item(trait_item_def_ids[i]) { + ty::MethodTraitItem(m) => Some(m), + _ => None + } + }); + + // Now list each method's DefId and Substs (for within its trait). + // If the method can never be called from this object, produce None. + trait_methods.map(move |trait_method| { + debug!("get_vtable_methods: trait_method={:?}", trait_method); + + // Some methods cannot be called on an object; skip those. + if !tcx.is_vtable_safe_method(trait_ref.def_id(), &trait_method) { + debug!("get_vtable_methods: not vtable safe"); + return None; + } + + // the method may have some early-bound lifetimes, add + // regions for those + let substs = Substs::for_item(tcx, trait_method.def_id, + |_, _| tcx.mk_region(ty::ReErased), + |def, _| trait_ref.substs().type_for_def(def)); + + // It's possible that the method relies on where clauses that + // do not hold for this particular set of type parameters. + // Note that this method could then never be called, so we + // do not want to try and trans it, in that case (see #23435). + let predicates = trait_method.predicates.instantiate_own(tcx, substs); + if !normalize_and_test_predicates(tcx, predicates.predicates) { + debug!("get_vtable_methods: predicates do not hold"); + return None; + } + + Some((trait_method.def_id, substs)) + }) + }) +} + impl<'tcx,O> Obligation<'tcx,O> { pub fn new(cause: ObligationCause<'tcx>, trait_ref: O) @@ -571,7 +653,7 @@ impl<'tcx> ObligationCause<'tcx> { } pub fn dummy() -> ObligationCause<'tcx> { - ObligationCause { span: DUMMY_SP, body_id: 0, code: MiscObligation } + ObligationCause { span: DUMMY_SP, body_id: ast::CRATE_NODE_ID, code: MiscObligation } } } diff --git a/src/librustc/traits/object_safety.rs b/src/librustc/traits/object_safety.rs index 4889895860..5f7b715182 100644 --- a/src/librustc/traits/object_safety.rs +++ b/src/librustc/traits/object_safety.rs @@ -20,7 +20,6 @@ use super::elaborate_predicates; use hir::def_id::DefId; -use ty::subst::{self, SelfSpace, TypeSpace}; use traits; use ty::{self, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; use std::rc::Rc; @@ -146,10 +145,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { match predicate { ty::Predicate::Trait(ref data) => { // In the case of a trait predicate, we can skip the "self" type. - data.0.trait_ref.substs.types.get_slice(TypeSpace) - .iter() - .cloned() - .any(|t| t.has_self_ty()) + data.skip_binder().input_types().skip(1).any(|t| t.has_self_ty()) } ty::Predicate::Projection(..) | ty::Predicate::WellFormed(..) | @@ -157,7 +153,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) | ty::Predicate::ClosureKind(..) | - ty::Predicate::Rfc1592(..) | ty::Predicate::Equate(..) => { false } @@ -166,25 +161,20 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } fn trait_has_sized_self(self, trait_def_id: DefId) -> bool { - let trait_def = self.lookup_trait_def(trait_def_id); - let trait_predicates = self.lookup_predicates(trait_def_id); - self.generics_require_sized_self(&trait_def.generics, &trait_predicates) + self.generics_require_sized_self(trait_def_id) } - fn generics_require_sized_self(self, - generics: &ty::Generics<'gcx>, - predicates: &ty::GenericPredicates<'gcx>) - -> bool - { + fn generics_require_sized_self(self, def_id: DefId) -> bool { let sized_def_id = match self.lang_items.sized_trait() { Some(def_id) => def_id, None => { return false; /* No Sized trait, can't require it! */ } }; // Search for a predicate like `Self : Sized` amongst the trait bounds. - let free_substs = self.construct_free_substs(generics, + let free_substs = self.construct_free_substs(def_id, self.region_maps.node_extent(ast::DUMMY_NODE_ID)); - let predicates = predicates.instantiate(self, &free_substs).predicates.into_vec(); + let predicates = self.lookup_predicates(def_id); + let predicates = predicates.instantiate(self, free_substs).predicates; elaborate_predicates(self, predicates) .any(|predicate| { match predicate { @@ -193,7 +183,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } ty::Predicate::Projection(..) | ty::Predicate::Trait(..) | - ty::Predicate::Rfc1592(..) | ty::Predicate::Equate(..) | ty::Predicate::RegionOutlives(..) | ty::Predicate::WellFormed(..) | @@ -214,7 +203,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { { // Any method that has a `Self : Sized` requisite is otherwise // exempt from the regulations. - if self.generics_require_sized_self(&method.generics, &method.predicates) { + if self.generics_require_sized_self(method.def_id) { return None; } @@ -231,7 +220,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { -> bool { // Any method that has a `Self : Sized` requisite can't be called. - if self.generics_require_sized_self(&method.generics, &method.predicates) { + if self.generics_require_sized_self(method.def_id) { return false; } @@ -274,7 +263,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } // We can't monomorphize things like `fn foo(...)`. - if !method.generics.types.is_empty_in(subst::FnSpace) { + if !method.generics.types.is_empty() { return Some(MethodViolationCode::Generic); } @@ -330,7 +319,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty.maybe_walk(|ty| { match ty.sty { ty::TyParam(ref param_ty) => { - if param_ty.space == SelfSpace { + if param_ty.is_self() { error = true; } diff --git a/src/librustc/traits/project.rs b/src/librustc/traits/project.rs index aed4f43932..ea4fc1c25a 100644 --- a/src/librustc/traits/project.rs +++ b/src/librustc/traits/project.rs @@ -811,7 +811,7 @@ fn assemble_candidates_from_trait_def<'cx, 'gcx, 'tcx>( // If so, extract what we know from the trait and try to come up with a good answer. let trait_predicates = selcx.tcx().lookup_predicates(def_id); let bounds = trait_predicates.instantiate(selcx.tcx(), substs); - let bounds = elaborate_predicates(selcx.tcx(), bounds.predicates.into_vec()); + let bounds = elaborate_predicates(selcx.tcx(), bounds.predicates); assemble_candidates_from_predicates(selcx, obligation, obligation_trait_ref, @@ -1133,10 +1133,9 @@ fn confirm_object_candidate<'cx, 'gcx, 'tcx>( object_ty) } }; - let projection_bounds = data.projection_bounds_with_self_ty(selcx.tcx(), object_ty); - let env_predicates = projection_bounds.iter() - .map(|p| p.to_predicate()) - .collect(); + let env_predicates = data.projection_bounds.iter().map(|p| { + p.with_self_ty(selcx.tcx(), object_ty).to_predicate() + }).collect(); let env_predicate = { let env_predicates = elaborate_predicates(selcx.tcx(), env_predicates); diff --git a/src/librustc/traits/select.rs b/src/librustc/traits/select.rs index b61cb0d3ee..05b4c44580 100644 --- a/src/librustc/traits/select.rs +++ b/src/librustc/traits/select.rs @@ -36,12 +36,13 @@ use super::util; use hir::def_id::DefId; use infer; use infer::{InferCtxt, InferOk, TypeFreshener, TypeOrigin}; -use ty::subst::{Subst, Substs, TypeSpace}; +use ty::subst::{Kind, Subst, Substs}; use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; use traits; use ty::fast_reject; use ty::relate::TypeRelation; +use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::snapshot_vec::{SnapshotVecDelegate, SnapshotVec}; use std::cell::RefCell; use std::fmt; @@ -335,7 +336,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { self.infcx.tcx } - pub fn param_env(&self) -> &'cx ty::ParameterEnvironment<'tcx> { + pub fn param_env(&self) -> &'cx ty::ParameterEnvironment<'gcx> { self.infcx.param_env() } @@ -512,8 +513,6 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } match obligation.predicate { - ty::Predicate::Rfc1592(..) => EvaluatedToOk, - ty::Predicate::Trait(ref t) => { assert!(!t.has_escaping_regions()); let obligation = obligation.with(t.clone()); @@ -643,8 +642,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // This suffices to allow chains like `FnMut` implemented in // terms of `Fn` etc, but we could probably make this more // precise still. - let input_types = stack.fresh_trait_ref.0.input_types(); - let unbound_input_types = input_types.iter().any(|ty| ty.is_fresh()); + let unbound_input_types = stack.fresh_trait_ref.input_types().any(|ty| ty.is_fresh()); if unbound_input_types && self.intercrate { debug!("evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous", stack.fresh_trait_ref); @@ -816,7 +814,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn filter_negative_impls(&self, candidate: SelectionCandidate<'tcx>) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { if let ImplCandidate(def_id) = candidate { - if self.tcx().trait_impl_polarity(def_id) == Some(hir::ImplPolarity::Negative) { + if self.tcx().trait_impl_polarity(def_id) == hir::ImplPolarity::Negative { return Err(Unimplemented) } } @@ -1063,9 +1061,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { match *candidate { Ok(Some(_)) | Err(_) => true, - Ok(None) => { - cache_fresh_trait_pred.0.trait_ref.substs.types.has_infer_types() - } + Ok(None) => cache_fresh_trait_pred.has_infer_types() } } @@ -1214,7 +1210,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { bounds); let matching_bound = - util::elaborate_predicates(self.tcx(), bounds.predicates.into_vec()) + util::elaborate_predicates(self.tcx(), bounds.predicates) .filter_to_traits() .find( |bound| self.probe( @@ -1249,7 +1245,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { obligation: &TraitObligation<'tcx>, trait_bound: ty::PolyTraitRef<'tcx>, skol_trait_ref: ty::TraitRef<'tcx>, - skol_map: &infer::SkolemizationMap, + skol_map: &infer::SkolemizationMap<'tcx>, snapshot: &infer::CombinedSnapshot) -> bool { @@ -1383,7 +1379,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } // provide an impl, but only for suitable `fn` pointers - ty::TyFnDef(_, _, &ty::BareFnTy { + ty::TyFnDef(.., &ty::BareFnTy { unsafety: hir::Unsafety::Normal, abi: Abi::Rust, sig: ty::Binder(ty::FnSig { @@ -1528,7 +1524,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ty::TyTrait(ref data) => { match this.tcx().lang_items.to_builtin_kind(obligation.predicate.def_id()) { Some(bound @ ty::BoundSend) | Some(bound @ ty::BoundSync) => { - if data.bounds.builtin_bounds.contains(&bound) { + if data.builtin_bounds.contains(&bound) { debug!("assemble_candidates_from_object_ty: matched builtin bound, \ pushing candidate"); candidates.vec.push(BuiltinObjectCandidate); @@ -1538,7 +1534,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { _ => {} } - data.principal_trait_ref_with_self_ty(this.tcx(), self_ty) + data.principal.with_self_ty(this.tcx(), self_ty) } ty::TyInfer(ty::TyVar(_)) => { debug!("assemble_candidates_from_object_ty: ambiguous"); @@ -1602,7 +1598,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { return; } }; - let target = obligation.predicate.0.input_types()[0]; + let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1); debug!("assemble_candidates_for_unsizing(source={:?}, target={:?})", source, target); @@ -1622,7 +1618,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // We always upcast when we can because of reason // #2 (region bounds). data_a.principal.def_id() == data_a.principal.def_id() && - data_a.bounds.builtin_bounds.is_superset(&data_b.bounds.builtin_bounds) + data_a.builtin_bounds.is_superset(&data_b.builtin_bounds) } // T -> Trait. @@ -1639,10 +1635,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } // [T; n] -> [T]. - (&ty::TyArray(_, _), &ty::TySlice(_)) => true, + (&ty::TyArray(..), &ty::TySlice(_)) => true, // Struct -> Struct. - (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => { + (&ty::TyAdt(def_id_a, _), &ty::TyAdt(def_id_b, _)) if def_id_a.is_struct() => { def_id_a == def_id_b } @@ -1781,11 +1777,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ty::TyStr | ty::TySlice(_) | ty::TyTrait(..) => Never, ty::TyTuple(tys) => { - // FIXME(#33242) we only need to constrain the last field - Where(ty::Binder(tys.to_vec())) + Where(ty::Binder(tys.last().into_iter().cloned().collect())) } - ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { + ty::TyAdt(def, substs) => { let sized_crit = def.sized_constraint(self.tcx()); // (*) binder moved here Where(ty::Binder(match sized_crit.sty { @@ -1841,8 +1836,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Where(ty::Binder(tys.to_vec())) } - ty::TyStruct(..) | ty::TyEnum(..) | - ty::TyProjection(..) | ty::TyParam(..) | ty::TyAnon(..) => { + ty::TyAdt(..) | ty::TyProjection(..) | ty::TyParam(..) | ty::TyAnon(..) => { // Fallback to whatever user-defined impls exist in this case. None } @@ -1934,11 +1928,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } // for `PhantomData`, we pass `T` - ty::TyStruct(def, substs) if def.is_phantom_data() => { - substs.types.get_slice(TypeSpace).to_vec() + ty::TyAdt(def, substs) if def.is_phantom_data() => { + substs.types().collect() } - ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { + ty::TyAdt(def, substs) => { def.all_fields() .map(|f| f.ty(self.tcx(), substs)) .collect() @@ -1984,7 +1978,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { trait_def_id, recursion_depth, normalized_ty, - vec![]); + &[]); obligations.push(skol_obligation); this.infcx().plug_leaks(skol_map, snapshot, &obligations) }) @@ -2179,13 +2173,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { match self_ty.sty { ty::TyTrait(ref data) => { // OK to skip the binder, it is reintroduced below - let input_types = data.principal.skip_binder().substs.types.get_slice(TypeSpace); - let assoc_types = data.bounds.projection_bounds - .iter() - .map(|pb| pb.skip_binder().ty); - let all_types: Vec<_> = input_types.iter().cloned() - .chain(assoc_types) - .collect(); + let input_types = data.principal.input_types(); + let assoc_types = data.projection_bounds.iter() + .map(|pb| pb.skip_binder().ty); + let all_types: Vec<_> = input_types.chain(assoc_types) + .collect(); // reintroduce the two binding levels we skipped, then flatten into one let all_types = ty::Binder(ty::Binder(all_types)); @@ -2267,7 +2259,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { mut substs: Normalized<'tcx, &'tcx Substs<'tcx>>, cause: ObligationCause<'tcx>, recursion_depth: usize, - skol_map: infer::SkolemizationMap, + skol_map: infer::SkolemizationMap<'tcx>, snapshot: &infer::CombinedSnapshot) -> VtableImplData<'tcx, PredicateObligation<'tcx>> { @@ -2315,7 +2307,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let poly_trait_ref = match self_ty.sty { ty::TyTrait(ref data) => { - data.principal_trait_ref_with_self_ty(self.tcx(), self_ty) + data.principal.with_self_ty(self.tcx(), self_ty) } _ => { span_bug!(obligation.cause.span, @@ -2476,7 +2468,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // regions here. See the comment there for more details. let source = self.infcx.shallow_resolve( tcx.no_late_bound_regions(&obligation.self_ty()).unwrap()); - let target = self.infcx.shallow_resolve(obligation.predicate.0.input_types()[0]); + let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1); + let target = self.infcx.shallow_resolve(target); debug!("confirm_builtin_unsize_candidate(source={:?}, target={:?})", source, target); @@ -2486,13 +2479,12 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // Trait+Kx+'a -> Trait+Ky+'b (upcasts). (&ty::TyTrait(ref data_a), &ty::TyTrait(ref data_b)) => { // See assemble_candidates_for_unsizing for more info. - let bounds = ty::ExistentialBounds { - region_bound: data_b.bounds.region_bound, - builtin_bounds: data_b.bounds.builtin_bounds, - projection_bounds: data_a.bounds.projection_bounds.clone(), - }; - - let new_trait = tcx.mk_trait(data_a.principal.clone(), bounds); + let new_trait = tcx.mk_trait(ty::TraitObject { + principal: data_a.principal, + region_bound: data_b.region_bound, + builtin_bounds: data_b.builtin_bounds, + projection_bounds: data_a.projection_bounds.clone(), + }); let origin = TypeOrigin::Misc(obligation.cause.span); let InferOk { obligations, .. } = self.infcx.sub_types(false, origin, new_trait, target) @@ -2503,8 +2495,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let cause = ObligationCause::new(obligation.cause.span, obligation.cause.body_id, ObjectCastObligation(target)); - let outlives = ty::OutlivesPredicate(data_a.bounds.region_bound, - data_b.bounds.region_bound); + let outlives = ty::OutlivesPredicate(data_a.region_bound, + data_b.region_bound); nested.push(Obligation::with_depth(cause, obligation.recursion_depth + 1, ty::Binder(outlives).to_predicate())); @@ -2512,12 +2504,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // T -> Trait. (_, &ty::TyTrait(ref data)) => { - let mut object_dids = Some(data.principal_def_id()).into_iter(); - // FIXME(#33243) -// data.bounds.builtin_bounds.iter().flat_map(|bound| { -// tcx.lang_items.from_builtin_kind(bound).ok() -// }) -// .chain(Some(data.principal_def_id())); + let mut object_dids = + data.builtin_bounds.iter().flat_map(|bound| { + tcx.lang_items.from_builtin_kind(bound).ok() + }) + .chain(Some(data.principal.def_id())); if let Some(did) = object_dids.find(|did| { !tcx.is_object_safe(*did) }) { @@ -2534,10 +2525,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { }; // Create the obligation for casting from T to Trait. - push(data.principal_trait_ref_with_self_ty(tcx, source).to_predicate()); + push(data.principal.with_self_ty(tcx, source).to_predicate()); // We can only make objects from sized types. - let mut builtin_bounds = data.bounds.builtin_bounds; + let mut builtin_bounds = data.builtin_bounds; builtin_bounds.insert(ty::BoundSized); // Create additional obligations for all the various builtin @@ -2553,14 +2544,13 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } // Create obligations for the projection predicates. - for bound in data.projection_bounds_with_self_ty(tcx, source) { - push(bound.to_predicate()); + for bound in &data.projection_bounds { + push(bound.with_self_ty(tcx, source).to_predicate()); } // If the type is `Foo+'a`, ensures that the type // being cast to `Foo+'a` outlives `'a`: - let outlives = ty::OutlivesPredicate(source, - data.bounds.region_bound); + let outlives = ty::OutlivesPredicate(source, data.region_bound); push(ty::Binder(outlives).to_predicate()); } @@ -2574,7 +2564,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } // Struct -> Struct. - (&ty::TyStruct(def, substs_a), &ty::TyStruct(_, substs_b)) => { + (&ty::TyAdt(def, substs_a), &ty::TyAdt(_, substs_b)) => { let fields = def .all_fields() .map(|f| f.unsubst_ty()) @@ -2586,17 +2576,15 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } else { return Err(Unimplemented); }; - let mut ty_params = vec![]; + let mut ty_params = BitVector::new(substs_a.types().count()); + let mut found = false; for ty in field.walk() { if let ty::TyParam(p) = ty.sty { - assert!(p.space == TypeSpace); - let idx = p.idx as usize; - if !ty_params.contains(&idx) { - ty_params.push(idx); - } + ty_params.insert(p.idx as usize); + found = true; } } - if ty_params.is_empty() { + if !found { return Err(Unimplemented); } @@ -2604,12 +2592,16 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // TyError and ensure they do not affect any other fields. // This could be checked after type collection for any struct // with a potentially unsized trailing field. - let mut new_substs = substs_a.clone(); - for &i in &ty_params { - new_substs.types.get_mut_slice(TypeSpace)[i] = tcx.types.err; - } + let params = substs_a.params().iter().enumerate().map(|(i, &k)| { + if ty_params.contains(i) { + Kind::from(tcx.types.err) + } else { + k + } + }); + let substs = Substs::new(tcx, params); for &ty in fields.split_last().unwrap().1 { - if ty.subst(tcx, &new_substs).references_error() { + if ty.subst(tcx, substs).references_error() { return Err(Unimplemented); } } @@ -2620,11 +2612,14 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // Check that the source structure with the target's // type parameters is a subtype of the target. - for &i in &ty_params { - let param_b = *substs_b.types.get(TypeSpace, i); - new_substs.types.get_mut_slice(TypeSpace)[i] = param_b; - } - let new_struct = tcx.mk_struct(def, tcx.mk_substs(new_substs)); + let params = substs_a.params().iter().enumerate().map(|(i, &k)| { + if ty_params.contains(i) { + Kind::from(substs_b.type_at(i)) + } else { + k + } + }); + let new_struct = tcx.mk_adt(def, Substs::new(tcx, params)); let origin = TypeOrigin::Misc(obligation.cause.span); let InferOk { obligations, .. } = self.infcx.sub_types(false, origin, new_struct, target) @@ -2637,7 +2632,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { obligation.predicate.def_id(), obligation.recursion_depth + 1, inner_source, - vec![inner_target])); + &[inner_target])); } _ => bug!() @@ -2660,7 +2655,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { impl_def_id: DefId, obligation: &TraitObligation<'tcx>, snapshot: &infer::CombinedSnapshot) - -> (Normalized<'tcx, &'tcx Substs<'tcx>>, infer::SkolemizationMap) + -> (Normalized<'tcx, &'tcx Substs<'tcx>>, + infer::SkolemizationMap<'tcx>) { match self.match_impl(impl_def_id, obligation, snapshot) { Ok((substs, skol_map)) => (substs, skol_map), @@ -2677,7 +2673,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { obligation: &TraitObligation<'tcx>, snapshot: &infer::CombinedSnapshot) -> Result<(Normalized<'tcx, &'tcx Substs<'tcx>>, - infer::SkolemizationMap), ()> + infer::SkolemizationMap<'tcx>), ()> { let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap(); @@ -2693,12 +2689,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { snapshot); let skol_obligation_trait_ref = skol_obligation.trait_ref; - let impl_substs = util::fresh_type_vars_for_impl(self.infcx, - obligation.cause.span, - impl_def_id); + let impl_substs = self.infcx.fresh_substs_for_item(obligation.cause.span, + impl_def_id); let impl_trait_ref = impl_trait_ref.subst(self.tcx(), - &impl_substs); + impl_substs); let impl_trait_ref = project::normalize_with_depth(self, @@ -2749,9 +2744,9 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // substitution if we find that any of the input types, when // simplified, do not match. - obligation.predicate.0.input_types().iter() + obligation.predicate.skip_binder().input_types() .zip(impl_trait_ref.input_types()) - .any(|(&obligation_ty, &impl_ty)| { + .any(|(obligation_ty, impl_ty)| { let simplified_obligation_ty = fast_reject::simplify_type(self.tcx(), obligation_ty, true); let simplified_impl_ty = @@ -2871,7 +2866,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { recursion_depth: usize, def_id: DefId, // of impl or trait substs: &Substs<'tcx>, // for impl or trait - skol_map: infer::SkolemizationMap, + skol_map: infer::SkolemizationMap<'tcx>, snapshot: &infer::CombinedSnapshot) -> Vec> { @@ -2892,20 +2887,18 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // obligation will normalize to `<$0 as Iterator>::Item = $1` and // `$1: Copy`, so we must ensure the obligations are emitted in // that order. - let predicates = tcx - .lookup_predicates(def_id) - .predicates.iter() - .flat_map(|predicate| { - let predicate = - normalize_with_depth(self, cause.clone(), recursion_depth, - &predicate.subst(tcx, substs)); - predicate.obligations.into_iter().chain( - Some(Obligation { - cause: cause.clone(), - recursion_depth: recursion_depth, - predicate: predicate.value - })) - }).collect(); + let predicates = tcx.lookup_predicates(def_id); + assert_eq!(predicates.parent, None); + let predicates = predicates.predicates.iter().flat_map(|predicate| { + let predicate = normalize_with_depth(self, cause.clone(), recursion_depth, + &predicate.subst(tcx, substs)); + predicate.obligations.into_iter().chain( + Some(Obligation { + cause: cause.clone(), + recursion_depth: recursion_depth, + predicate: predicate.value + })) + }).collect(); self.infcx().plug_leaks(skol_map, snapshot, &predicates) } } diff --git a/src/librustc/traits/specialize/mod.rs b/src/librustc/traits/specialize/mod.rs index 38cccb9753..e281a4a99b 100644 --- a/src/librustc/traits/specialize/mod.rs +++ b/src/librustc/traits/specialize/mod.rs @@ -18,7 +18,7 @@ // fits together with the rest of the trait machinery. use super::{SelectionContext, FulfillmentContext}; -use super::util::{fresh_type_vars_for_impl, impl_trait_ref_and_oblig}; +use super::util::impl_trait_ref_and_oblig; use rustc_data_structures::fnv::FnvHashMap; use hir::def_id::DefId; @@ -26,9 +26,11 @@ use infer::{InferCtxt, TypeOrigin}; use middle::region; use ty::subst::{Subst, Substs}; use traits::{self, Reveal, ObligationCause, Normalized}; -use ty::{self, TyCtxt}; +use ty::{self, TyCtxt, TypeFoldable}; use syntax_pos::DUMMY_SP; +use syntax::ast; + pub mod specialization_graph; /// Information pertinent to an overlapping impl error. @@ -44,11 +46,10 @@ pub struct OverlapError { /// When we have selected one impl, but are actually using item definitions from /// a parent impl providing a default, we need a way to translate between the /// type parameters of the two impls. Here the `source_impl` is the one we've -/// selected, and `source_substs` is a substitution of its generics (and -/// possibly some relevant `FnSpace` variables as well). And `target_node` is -/// the impl/trait we're actually going to get the definition from. The resulting -/// substitution will map from `target_node`'s generics to `source_impl`'s -/// generics as instantiated by `source_subst`. +/// selected, and `source_substs` is a substitution of its generics. +/// And `target_node` is the impl/trait we're actually going to get the +/// definition from. The resulting substitution will map from `target_node`'s +/// generics to `source_impl`'s generics as instantiated by `source_subst`. /// /// For example, consider the following scenario: /// @@ -101,7 +102,42 @@ pub fn translate_substs<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, }; // directly inherent the method generics, since those do not vary across impls - infcx.tcx.mk_substs(target_substs.with_method_from_subst(source_substs)) + source_substs.rebase_onto(infcx.tcx, source_impl, target_substs) +} + +/// Given a selected impl described by `impl_data`, returns the +/// definition and substitions for the method with the name `name`, +/// and trait method substitutions `substs`, in that impl, a less +/// specialized impl, or the trait default, whichever applies. +pub fn find_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + name: ast::Name, + substs: &'tcx Substs<'tcx>, + impl_data: &super::VtableImplData<'tcx, ()>) + -> (DefId, &'tcx Substs<'tcx>) +{ + assert!(!substs.needs_infer()); + + let trait_def_id = tcx.trait_id_of_impl(impl_data.impl_def_id).unwrap(); + let trait_def = tcx.lookup_trait_def(trait_def_id); + + match trait_def.ancestors(impl_data.impl_def_id).fn_defs(tcx, name).next() { + Some(node_item) => { + let substs = tcx.infer_ctxt(None, None, Reveal::All).enter(|infcx| { + let substs = substs.rebase_onto(tcx, trait_def_id, impl_data.substs); + let substs = translate_substs(&infcx, impl_data.impl_def_id, + substs, node_item.node); + tcx.lift(&substs).unwrap_or_else(|| { + bug!("find_method: translate_substs \ + returned {:?} which contains inference types/regions", + substs); + }) + }); + (node_item.item.def_id, substs) + } + None => { + bug!("method {:?} not found in {:?}", name, impl_data.impl_def_id) + } + } } /// Is impl1 a specialization of impl2? @@ -112,6 +148,8 @@ pub fn translate_substs<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, pub fn specializes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl1_def_id: DefId, impl2_def_id: DefId) -> bool { + debug!("specializes({:?}, {:?})", impl1_def_id, impl2_def_id); + if let Some(r) = tcx.specializes_cache.borrow().check(impl1_def_id, impl2_def_id) { return r; } @@ -141,24 +179,23 @@ pub fn specializes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } // create a parameter environment corresponding to a (skolemized) instantiation of impl1 - let scheme = tcx.lookup_item_type(impl1_def_id); - let predicates = tcx.lookup_predicates(impl1_def_id); - let mut penv = tcx.construct_parameter_environment(DUMMY_SP, - &scheme.generics, - &predicates, - region::DUMMY_CODE_EXTENT); + let penv = tcx.construct_parameter_environment(DUMMY_SP, + impl1_def_id, + region::DUMMY_CODE_EXTENT); let impl1_trait_ref = tcx.impl_trait_ref(impl1_def_id) .unwrap() .subst(tcx, &penv.free_substs); - let result = tcx.normalizing_infer_ctxt(Reveal::ExactMatch).enter(|mut infcx| { + // Create a infcx, taking the predicates of impl1 as assumptions: + let result = tcx.infer_ctxt(None, Some(penv), Reveal::ExactMatch).enter(|mut infcx| { // Normalize the trait reference, adding any obligations // that arise into the impl1 assumptions. let Normalized { value: impl1_trait_ref, obligations: normalization_obligations } = { let selcx = &mut SelectionContext::new(&infcx); traits::normalize(selcx, ObligationCause::dummy(), &impl1_trait_ref) }; - penv.caller_bounds.extend(normalization_obligations.into_iter().map(|o| { + infcx.parameter_environment.caller_bounds + .extend(normalization_obligations.into_iter().map(|o| { match tcx.lift_to_global(&o.predicate) { Some(predicate) => predicate, None => { @@ -167,9 +204,6 @@ pub fn specializes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } })); - // Install the parameter environment, taking the predicates of impl1 as assumptions: - infcx.parameter_environment = penv; - // Attempt to prove that impl2 applies, given all of the above. fulfill_implication(&infcx, impl1_trait_ref, impl2_def_id).is_ok() }); @@ -188,10 +222,10 @@ fn fulfill_implication<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, target_impl: DefId) -> Result<&'tcx Substs<'tcx>, ()> { let selcx = &mut SelectionContext::new(&infcx); - let target_substs = fresh_type_vars_for_impl(&infcx, DUMMY_SP, target_impl); + let target_substs = infcx.fresh_substs_for_item(DUMMY_SP, target_impl); let (target_trait_ref, obligations) = impl_trait_ref_and_oblig(selcx, target_impl, - &target_substs); + target_substs); // do the impls unify? If not, no specialization. if let Err(_) = infcx.eq_trait_refs(true, @@ -207,29 +241,34 @@ fn fulfill_implication<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, // attempt to prove all of the predicates for impl2 given those for impl1 // (which are packed up in penv) - let mut fulfill_cx = FulfillmentContext::new(); - for oblig in obligations.into_iter() { - fulfill_cx.register_predicate_obligation(&infcx, oblig); - } + infcx.save_and_restore_obligations_in_snapshot_flag(|infcx| { + let mut fulfill_cx = FulfillmentContext::new(); + for oblig in obligations.into_iter() { + fulfill_cx.register_predicate_obligation(&infcx, oblig); + } + match fulfill_cx.select_all_or_error(infcx) { + Err(errors) => { + // no dice! + debug!("fulfill_implication: for impls on {:?} and {:?}, \ + could not fulfill: {:?} given {:?}", + source_trait_ref, + target_trait_ref, + errors, + infcx.parameter_environment.caller_bounds); + Err(()) + } - if let Err(errors) = infcx.drain_fulfillment_cx(&mut fulfill_cx, &()) { - // no dice! - debug!("fulfill_implication: for impls on {:?} and {:?}, could not fulfill: {:?} given \ - {:?}", - source_trait_ref, - target_trait_ref, - errors, - infcx.parameter_environment.caller_bounds); - Err(()) - } else { - debug!("fulfill_implication: an impl for {:?} specializes {:?}", - source_trait_ref, - target_trait_ref); + Ok(()) => { + debug!("fulfill_implication: an impl for {:?} specializes {:?}", + source_trait_ref, + target_trait_ref); - // Now resolve the *substitution* we built for the target earlier, replacing - // the inference variables inside with whatever we got from fulfillment. - Ok(infcx.resolve_type_vars_if_possible(&target_substs)) - } + // Now resolve the *substitution* we built for the target earlier, replacing + // the inference variables inside with whatever we got from fulfillment. + Ok(infcx.resolve_type_vars_if_possible(&target_substs)) + } + } + }) } pub struct SpecializesCache { diff --git a/src/librustc/traits/specialize/specialization_graph.rs b/src/librustc/traits/specialize/specialization_graph.rs index a47cd23c64..1374719ef4 100644 --- a/src/librustc/traits/specialize/specialization_graph.rs +++ b/src/librustc/traits/specialize/specialization_graph.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cell; use std::rc::Rc; use super::{OverlapError, specializes}; @@ -122,19 +121,18 @@ impl<'a, 'gcx, 'tcx> Children { if le == ge { // overlap, but no specialization; error out let trait_ref = impl_header.trait_ref.unwrap(); + let self_ty = trait_ref.self_ty(); Err(OverlapError { with_impl: possible_sibling, trait_desc: trait_ref.to_string(), - self_desc: trait_ref.substs.self_ty().and_then(|ty| { - // only report the Self type if it has at least - // some outer concrete shell; otherwise, it's - // not adding much information. - if ty.has_concrete_skeleton() { - Some(ty.to_string()) - } else { - None - } - }) + // only report the Self type if it has at least + // some outer concrete shell; otherwise, it's + // not adding much information. + self_desc: if self_ty.has_concrete_skeleton() { + Some(self_ty.to_string()) + } else { + None + } }) } else { Ok((le, ge)) @@ -288,21 +286,10 @@ impl<'a, 'gcx, 'tcx> Node { /// Iterate over the items defined directly by the given (impl or trait) node. pub fn items(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> NodeItems<'a, 'gcx> { - match *self { - Node::Impl(impl_def_id) => { - NodeItems::Impl { - tcx: tcx.global_tcx(), - items: cell::Ref::map(tcx.impl_items.borrow(), - |impl_items| &impl_items[&impl_def_id]), - idx: 0, - } - } - Node::Trait(trait_def_id) => { - NodeItems::Trait { - items: tcx.trait_items(trait_def_id).clone(), - idx: 0, - } - } + NodeItems { + tcx: tcx.global_tcx(), + items: tcx.impl_or_trait_items(self.def_id()), + idx: 0, } } @@ -315,42 +302,23 @@ impl<'a, 'gcx, 'tcx> Node { } /// An iterator over the items defined within a trait or impl. -pub enum NodeItems<'a, 'tcx: 'a> { - Impl { - tcx: TyCtxt<'a, 'tcx, 'tcx>, - items: cell::Ref<'a, Vec>, - idx: usize, - }, - Trait { - items: Rc>>, - idx: usize, - }, +pub struct NodeItems<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + items: Rc>, + idx: usize } impl<'a, 'tcx> Iterator for NodeItems<'a, 'tcx> { type Item = ImplOrTraitItem<'tcx>; fn next(&mut self) -> Option> { - match *self { - NodeItems::Impl { tcx, ref items, ref mut idx } => { - let items_table = tcx.impl_or_trait_items.borrow(); - if *idx < items.len() { - let item_def_id = items[*idx].def_id(); - let item = items_table[&item_def_id].clone(); - *idx += 1; - Some(item) - } else { - None - } - } - NodeItems::Trait { ref items, ref mut idx } => { - if *idx < items.len() { - let item = items[*idx].clone(); - *idx += 1; - Some(item) - } else { - None - } - } + if self.idx < self.items.len() { + let item_def_id = self.items[self.idx]; + let items_table = self.tcx.impl_or_trait_items.borrow(); + let item = items_table[&item_def_id].clone(); + self.idx += 1; + Some(item) + } else { + None } } } diff --git a/src/librustc/traits/util.rs b/src/librustc/traits/util.rs index 818eb4eb2f..2cefc2ad79 100644 --- a/src/librustc/traits/util.rs +++ b/src/librustc/traits/util.rs @@ -9,10 +9,8 @@ // except according to those terms. use hir::def_id::DefId; -use infer::InferCtxt; use ty::subst::{Subst, Substs}; use ty::{self, Ty, TyCtxt, ToPredicate, ToPolyTraitRef}; -use syntax_pos::Span; use util::common::ErrorReported; use util::nodemap::FnvHashSet; @@ -25,9 +23,6 @@ fn anonymize_predicate<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty::Predicate::Trait(ref data) => ty::Predicate::Trait(tcx.anonymize_late_bound_regions(data)), - ty::Predicate::Rfc1592(ref data) => - ty::Predicate::Rfc1592(Box::new(anonymize_predicate(tcx, data))), - ty::Predicate::Equate(ref data) => ty::Predicate::Equate(tcx.anonymize_late_bound_regions(data)), @@ -152,9 +147,6 @@ impl<'cx, 'gcx, 'tcx> Elaborator<'cx, 'gcx, 'tcx> { self.stack.extend(predicates); } - ty::Predicate::Rfc1592(..) => { - // Nothing to elaborate. - } ty::Predicate::WellFormed(..) => { // Currently, we do not elaborate WF predicates, // although we easily could. @@ -349,20 +341,6 @@ pub fn impl_trait_ref_and_oblig<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, (impl_trait_ref, impl_obligations) } -// determine the `self` type, using fresh variables for all variables -// declared on the impl declaration e.g., `impl for Box<[(A,B)]>` -// would return ($0, $1) where $0 and $1 are freshly instantiated type -// variables. -pub fn fresh_type_vars_for_impl<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, - span: Span, - impl_def_id: DefId) - -> &'tcx Substs<'tcx> -{ - let tcx = infcx.tcx; - let impl_generics = tcx.lookup_item_type(impl_def_id).generics; - infcx.fresh_substs_for_generics(span, &impl_generics) -} - /// See `super::obligations_for_generics` pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>, recursion_depth: usize, @@ -402,7 +380,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { Ok(def_id) => { Ok(ty::TraitRef { def_id: def_id, - substs: self.mk_substs(Substs::empty().with_self_ty(param_ty)) + substs: Substs::new_trait(self, param_ty, &[]) }) } Err(e) => { @@ -417,12 +395,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { trait_def_id: DefId, recursion_depth: usize, param_ty: Ty<'tcx>, - ty_params: Vec>) + ty_params: &[Ty<'tcx>]) -> PredicateObligation<'tcx> { let trait_ref = ty::TraitRef { def_id: trait_def_id, - substs: self.mk_substs(Substs::new_trait(ty_params, vec![], param_ty)) + substs: Substs::new_trait(self, param_ty, ty_params) }; predicate_for_trait_ref(cause, trait_ref, recursion_depth) } @@ -510,10 +488,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { TupleArgumentsFlag::No => sig.0.inputs[0], TupleArgumentsFlag::Yes => self.mk_tup(sig.0.inputs.to_vec()), }; - let trait_substs = Substs::new_trait(vec![arguments_tuple], vec![], self_ty); let trait_ref = ty::TraitRef { def_id: fn_trait_def_id, - substs: self.mk_substs(trait_substs), + substs: Substs::new_trait(self, self_ty, &[arguments_tuple]), }; ty::Binder((trait_ref, sig.0.output)) } diff --git a/src/librustc/ty/_match.rs b/src/librustc/ty/_match.rs index 39dba57c47..b1846e0394 100644 --- a/src/librustc/ty/_match.rs +++ b/src/librustc/ty/_match.rs @@ -52,7 +52,8 @@ impl<'a, 'gcx, 'tcx> TypeRelation<'a, 'gcx, 'tcx> for Match<'a, 'gcx, 'tcx> { self.relate(a, b) } - fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> { + fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> { debug!("{}.regions({:?}, {:?})", self.tag(), a, diff --git a/src/librustc/ty/adjustment.rs b/src/librustc/ty/adjustment.rs index ae9fd5ab5b..cfe370343a 100644 --- a/src/librustc/ty/adjustment.rs +++ b/src/librustc/ty/adjustment.rs @@ -19,7 +19,7 @@ use syntax_pos::Span; use hir; -#[derive(Copy, Clone)] +#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] pub enum AutoAdjustment<'tcx> { AdjustNeverToAny(Ty<'tcx>), // go from ! to any type AdjustReifyFnPointer, // go from a fn-item type to a fn-pointer type @@ -90,7 +90,7 @@ pub enum AutoAdjustment<'tcx> { /// unsize: Some(Box<[i32]>), /// } /// ``` -#[derive(Copy, Clone)] +#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] pub struct AutoDerefRef<'tcx> { /// Step 1. Apply a number of dereferences, producing an lvalue. pub autoderefs: usize, @@ -122,7 +122,7 @@ impl<'tcx> AutoDerefRef<'tcx> { } -#[derive(Copy, Clone, PartialEq, Debug)] +#[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable)] pub enum AutoRef<'tcx> { /// Convert from T to &T. AutoPtr(&'tcx ty::Region, hir::Mutability), @@ -160,7 +160,7 @@ impl<'a, 'gcx, 'tcx> ty::TyS<'tcx> { AdjustReifyFnPointer => { match self.sty { - ty::TyFnDef(_, _, f) => tcx.mk_fn_ptr(f), + ty::TyFnDef(.., f) => tcx.mk_fn_ptr(f), _ => { bug!("AdjustReifyFnPointer adjustment on non-fn-item: {:?}", self); diff --git a/src/librustc/ty/cast.rs b/src/librustc/ty/cast.rs index c8d282d18a..0badb85e9e 100644 --- a/src/librustc/ty/cast.rs +++ b/src/librustc/ty/cast.rs @@ -65,7 +65,7 @@ impl<'tcx> CastTy<'tcx> { ty::TyInt(_) => Some(CastTy::Int(IntTy::I)), ty::TyUint(u) => Some(CastTy::Int(IntTy::U(u))), ty::TyFloat(_) => Some(CastTy::Float), - ty::TyEnum(d,_) if d.is_payloadfree() => + ty::TyAdt(d,_) if d.is_enum() && d.is_payloadfree() => Some(CastTy::Int(IntTy::CEnum)), ty::TyRawPtr(ref mt) => Some(CastTy::Ptr(mt)), ty::TyRef(_, ref mt) => Some(CastTy::RPtr(mt)), diff --git a/src/librustc/ty/contents.rs b/src/librustc/ty/contents.rs index 53bf046d6b..b499e1346e 100644 --- a/src/librustc/ty/contents.rs +++ b/src/librustc/ty/contents.rs @@ -202,7 +202,7 @@ impl<'a, 'tcx> ty::TyS<'tcx> { TC::None } - ty::TyRef(_, _) => { + ty::TyRef(..) => { TC::None } @@ -224,7 +224,7 @@ impl<'a, 'tcx> ty::TyS<'tcx> { |ty| tc_ty(tcx, *ty, cache)) } - ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { + ty::TyAdt(def, substs) => { let mut res = TypeContents::union(&def.variants, |v| { TypeContents::union(&v.fields, |f| { diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 4056fb01aa..0c7c387b67 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -13,23 +13,22 @@ use dep_graph::{DepGraph, DepTrackingMap}; use session::Session; use middle; -use middle::cstore::LOCAL_CRATE; use hir::TraitMap; use hir::def::DefMap; -use hir::def_id::{DefId, DefIndex}; +use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE}; use hir::map as ast_map; use hir::map::{DefKey, DefPath, DefPathData, DisambiguatedDefPathData}; use middle::free_region::FreeRegionMap; use middle::region::RegionMaps; use middle::resolve_lifetime; use middle::stability; -use ty::subst::{self, Substs}; +use ty::subst::Substs; use traits; use ty::{self, TraitRef, Ty, TypeAndMut}; -use ty::{TyS, TypeVariants}; -use ty::{AdtDef, ClosureSubsts, ExistentialBounds, Region}; +use ty::{TyS, TypeVariants, Slice}; +use ty::{AdtKind, AdtDef, ClosureSubsts, Region}; use hir::FreevarMap; -use ty::{BareFnTy, InferTy, ParamTy, ProjectionTy, TraitTy}; +use ty::{BareFnTy, InferTy, ParamTy, ProjectionTy, TraitObject}; use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid}; use ty::TypeVariants::*; use ty::layout::{Layout, TargetDataLayout}; @@ -63,6 +62,7 @@ pub struct CtxtArenas<'tcx> { layout: TypedArena, // references + generics: TypedArena>, trait_defs: TypedArena>, adt_defs: TypedArena>, } @@ -78,6 +78,7 @@ impl<'tcx> CtxtArenas<'tcx> { stability: TypedArena::new(), layout: TypedArena::new(), + generics: TypedArena::new(), trait_defs: TypedArena::new(), adt_defs: TypedArena::new() } @@ -91,7 +92,7 @@ pub struct CtxtInterners<'tcx> { /// Specifically use a speedy hash algorithm for these hash sets, /// they're accessed quite often. type_: RefCell>>>, - type_list: RefCell]>>>, + type_list: RefCell>>>>, substs: RefCell>>>, bare_fn: RefCell>>>, region: RefCell>>, @@ -211,7 +212,7 @@ pub struct Tables<'tcx> { pub method_map: ty::MethodMap<'tcx>, /// Borrows - pub upvar_capture_map: ty::UpvarCaptureMap, + pub upvar_capture_map: ty::UpvarCaptureMap<'tcx>, /// Records the type of each closure. The def ID is the ID of the /// expression defining the closure. @@ -329,8 +330,8 @@ pub struct GlobalCtxt<'tcx> { /// Maps from a trait item to the trait item "descriptor" pub impl_or_trait_items: RefCell>>, - /// Maps from a trait def-id to a list of the def-ids of its trait items - pub trait_item_def_ids: RefCell>>, + /// Maps from an impl/trait def-id to a list of the def-ids of its items + pub impl_or_trait_item_def_ids: RefCell>>, /// A cache for the trait_items() routine; note that the routine /// itself pushes the `TraitItems` dependency node. @@ -341,7 +342,8 @@ pub struct GlobalCtxt<'tcx> { pub adt_defs: RefCell>>, /// Maps from the def-id of an item (trait/struct/enum/fn) to its - /// associated predicates. + /// associated generics and predicates. + pub generics: RefCell>>, pub predicates: RefCell>>, /// Maps from the def-id of a trait to the list of @@ -390,12 +392,6 @@ pub struct GlobalCtxt<'tcx> { /// Methods in these implementations don't need to be exported. pub inherent_impls: RefCell>>, - /// Maps a DefId of an impl to a list of its items. - /// Note that this contains all of the impls that we know about, - /// including ones in other crates. It's not clear that this is the best - /// way to do it. - pub impl_items: RefCell>>, - /// Set of used unsafe nodes (functions or blocks). Unsafe nodes not /// present in this set can be warned about. pub used_unsafe: RefCell, @@ -492,6 +488,13 @@ pub struct GlobalCtxt<'tcx> { /// Cache for layouts computed from types. pub layout_cache: RefCell, &'tcx Layout>>, + + /// Used to prevent layout from recursing too deeply. + pub layout_depth: Cell, + + /// Map from function to the `#[derive]` mode that it's defining. Only used + /// by `rustc-macro` crates. + pub derive_macros: RefCell>, } impl<'tcx> GlobalCtxt<'tcx> { @@ -505,7 +508,7 @@ impl<'tcx> GlobalCtxt<'tcx> { } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - pub fn crate_name(self, cnum: ast::CrateNum) -> token::InternedString { + pub fn crate_name(self, cnum: CrateNum) -> token::InternedString { if cnum == LOCAL_CRATE { self.crate_name.clone() } else { @@ -513,7 +516,15 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - pub fn crate_disambiguator(self, cnum: ast::CrateNum) -> token::InternedString { + pub fn original_crate_name(self, cnum: CrateNum) -> token::InternedString { + if cnum == LOCAL_CRATE { + self.crate_name.clone() + } else { + self.sess.cstore.original_crate_name(cnum) + } + } + + pub fn crate_disambiguator(self, cnum: CrateNum) -> token::InternedString { if cnum == LOCAL_CRATE { self.sess.local_crate_disambiguator() } else { @@ -526,7 +537,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// relative to `krate`. /// /// Returns `None` if there is no `DefIndex` with that key. - pub fn def_index_for_def_key(self, krate: ast::CrateNum, key: DefKey) + pub fn def_index_for_def_key(self, krate: CrateNum, key: DefKey) -> Option { if krate == LOCAL_CRATE { self.map.def_index_for_def_key(key) @@ -583,13 +594,19 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.tables.borrow_mut().node_types.insert(id, ty); } + pub fn alloc_generics(self, generics: ty::Generics<'gcx>) + -> &'gcx ty::Generics<'gcx> { + self.global_interners.arenas.generics.alloc(generics) + } + pub fn intern_trait_def(self, def: ty::TraitDef<'gcx>) -> &'gcx ty::TraitDef<'gcx> { let did = def.trait_ref.def_id; - let interned = self.global_interners.arenas.trait_defs.alloc(def); + let interned = self.alloc_trait_def(def); if let Some(prev) = self.trait_defs.borrow_mut().insert(did, interned) { bug!("Tried to overwrite interned TraitDef: {:?}", prev) } + self.generics.borrow_mut().insert(did, interned.generics); interned } @@ -607,7 +624,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn intern_adt_def(self, did: DefId, - kind: ty::AdtKind, + kind: AdtKind, variants: Vec>) -> ty::AdtDefMaster<'gcx> { let def = ty::AdtDefData::new(self, did, kind, variants); @@ -711,6 +728,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { impl_trait_refs: RefCell::new(DepTrackingMap::new(dep_graph.clone())), trait_defs: RefCell::new(DepTrackingMap::new(dep_graph.clone())), adt_defs: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + generics: RefCell::new(DepTrackingMap::new(dep_graph.clone())), predicates: RefCell::new(DepTrackingMap::new(dep_graph.clone())), super_predicates: RefCell::new(DepTrackingMap::new(dep_graph.clone())), fulfilled_predicates: RefCell::new(fulfilled_predicates), @@ -721,13 +739,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { rcache: RefCell::new(FnvHashMap()), tc_cache: RefCell::new(FnvHashMap()), impl_or_trait_items: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - trait_item_def_ids: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + impl_or_trait_item_def_ids: RefCell::new(DepTrackingMap::new(dep_graph.clone())), trait_items_cache: RefCell::new(DepTrackingMap::new(dep_graph.clone())), ty_param_defs: RefCell::new(NodeMap()), normalized_cache: RefCell::new(FnvHashMap()), lang_items: lang_items, inherent_impls: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - impl_items: RefCell::new(DepTrackingMap::new(dep_graph.clone())), used_unsafe: RefCell::new(NodeSet()), used_mut_nodes: RefCell::new(NodeSet()), used_trait_imports: RefCell::new(NodeSet()), @@ -746,6 +763,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { crate_name: token::intern_and_get_ident(crate_name), data_layout: data_layout, layout_cache: RefCell::new(FnvHashMap()), + layout_depth: Cell::new(0), + derive_macros: RefCell::new(NodeMap()), }, f) } } @@ -833,10 +852,11 @@ impl<'a, 'tcx> Lift<'tcx> for &'a Region { } } -impl<'a, 'tcx> Lift<'tcx> for &'a [Ty<'a>] { - type Lifted = &'tcx [Ty<'tcx>]; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx [Ty<'tcx>]> { - if let Some(&Interned(list)) = tcx.interners.type_list.borrow().get(*self) { +impl<'a, 'tcx> Lift<'tcx> for &'a Slice> { + type Lifted = &'tcx Slice>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) + -> Option<&'tcx Slice>> { + if let Some(&Interned(list)) = tcx.interners.type_list.borrow().get(&self[..]) { if *self as *const _ == list as *const _ { return Some(list); } @@ -1017,8 +1037,8 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { pub fn print_debug_stats(self) { sty_debug_print!( self, - TyEnum, TyBox, TyArray, TySlice, TyRawPtr, TyRef, TyFnDef, TyFnPtr, - TyTrait, TyStruct, TyClosure, TyTuple, TyParam, TyInfer, TyProjection, TyAnon); + TyAdt, TyBox, TyArray, TySlice, TyRawPtr, TyRef, TyFnDef, TyFnPtr, + TyTrait, TyClosure, TyTuple, TyParam, TyInfer, TyProjection, TyAnon); println!("Substs interner: #{}", self.interners.substs.borrow().len()); println!("BareFnTy interner: #{}", self.interners.bare_fn.borrow().len()); @@ -1053,9 +1073,24 @@ impl<'tcx: 'lcx, 'lcx> Borrow> for Interned<'tcx, TyS<'tcx>> } } -impl<'tcx: 'lcx, 'lcx> Borrow<[Ty<'lcx>]> for Interned<'tcx, [Ty<'tcx>]> { +// NB: An Interned> compares and hashes as its elements. +impl<'tcx, T: PartialEq> PartialEq for Interned<'tcx, Slice> { + fn eq(&self, other: &Interned<'tcx, Slice>) -> bool { + self.0[..] == other.0[..] + } +} + +impl<'tcx, T: Eq> Eq for Interned<'tcx, Slice> {} + +impl<'tcx, T: Hash> Hash for Interned<'tcx, Slice> { + fn hash(&self, s: &mut H) { + self.0[..].hash(s) + } +} + +impl<'tcx: 'lcx, 'lcx> Borrow<[Ty<'lcx>]> for Interned<'tcx, Slice>> { fn borrow<'a>(&'a self) -> &'a [Ty<'lcx>] { - self.0 + &self.0[..] } } @@ -1077,32 +1112,23 @@ impl<'tcx> Borrow for Interned<'tcx, Region> { } } -macro_rules! items { ($($item:item)+) => ($($item)+) } -macro_rules! impl_interners { - ($lt_tcx:tt, $($name:ident: $method:ident($alloc:ty, $needs_infer:expr)-> $ty:ty),+) => { - items!($(impl<$lt_tcx> PartialEq for Interned<$lt_tcx, $ty> { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 - } - } - - impl<$lt_tcx> Eq for Interned<$lt_tcx, $ty> {} - - impl<$lt_tcx> Hash for Interned<$lt_tcx, $ty> { - fn hash(&self, s: &mut H) { - self.0.hash(s) - } - } - +macro_rules! intern_method { + ($lt_tcx:tt, $name:ident: $method:ident($alloc:ty, + $alloc_to_key:expr, + $alloc_to_ret:expr, + $needs_infer:expr) -> $ty:ty) => { impl<'a, 'gcx, $lt_tcx> TyCtxt<'a, 'gcx, $lt_tcx> { pub fn $method(self, v: $alloc) -> &$lt_tcx $ty { - if let Some(i) = self.interners.$name.borrow().get::<$ty>(&v) { - return i.0; - } - if !self.is_global() { - if let Some(i) = self.global_interners.$name.borrow().get::<$ty>(&v) { + { + let key = ($alloc_to_key)(&v); + if let Some(i) = self.interners.$name.borrow().get(key) { return i.0; } + if !self.is_global() { + if let Some(i) = self.global_interners.$name.borrow().get(key) { + return i.0; + } + } } // HACK(eddyb) Depend on flags being accurate to @@ -1113,7 +1139,7 @@ macro_rules! impl_interners { let v = unsafe { mem::transmute(v) }; - let i = self.global_interners.arenas.$name.alloc(v); + let i = ($alloc_to_ret)(self.global_interners.arenas.$name.alloc(v)); self.global_interners.$name.borrow_mut().insert(Interned(i)); return i; } @@ -1127,11 +1153,31 @@ macro_rules! impl_interners { } } - let i = self.interners.arenas.$name.alloc(v); + let i = ($alloc_to_ret)(self.interners.arenas.$name.alloc(v)); self.interners.$name.borrow_mut().insert(Interned(i)); i } - })+); + } + } +} + +macro_rules! direct_interners { + ($lt_tcx:tt, $($name:ident: $method:ident($needs_infer:expr) -> $ty:ty),+) => { + $(impl<$lt_tcx> PartialEq for Interned<$lt_tcx, $ty> { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl<$lt_tcx> Eq for Interned<$lt_tcx, $ty> {} + + impl<$lt_tcx> Hash for Interned<$lt_tcx, $ty> { + fn hash(&self, s: &mut H) { + self.0.hash(s) + } + } + + intern_method!($lt_tcx, $name: $method($ty, |x| x, |x| x, $needs_infer) -> $ty);)+ } } @@ -1139,22 +1185,26 @@ fn keep_local<'tcx, T: ty::TypeFoldable<'tcx>>(x: &T) -> bool { x.has_type_flags(ty::TypeFlags::KEEP_IN_LOCAL_TCX) } -impl_interners!('tcx, - type_list: mk_type_list(Vec>, keep_local) -> [Ty<'tcx>], - substs: mk_substs(Substs<'tcx>, |substs: &Substs| { - keep_local(&substs.types) || keep_local(&substs.regions) +direct_interners!('tcx, + substs: mk_substs(|substs: &Substs| { + substs.params().iter().any(keep_local) }) -> Substs<'tcx>, - bare_fn: mk_bare_fn(BareFnTy<'tcx>, |fty: &BareFnTy| { + bare_fn: mk_bare_fn(|fty: &BareFnTy| { keep_local(&fty.sig) }) -> BareFnTy<'tcx>, - region: mk_region(Region, keep_local) -> Region + region: mk_region(|r| { + match r { + &ty::ReVar(_) | &ty::ReSkolemized(..) => true, + _ => false + } + }) -> Region ); -fn bound_list_is_sorted(bounds: &[ty::PolyProjectionPredicate]) -> bool { - bounds.is_empty() || - bounds[1..].iter().enumerate().all( - |(index, bound)| bounds[index].sort_key() <= bound.sort_key()) -} +intern_method!('tcx, + type_list: mk_type_list(Vec>, Deref::deref, |xs: &[Ty]| -> &Slice { + unsafe { mem::transmute(xs) } + }, keep_local) -> Slice> +); impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// Create an unsafe fn ty based on a safe fn ty. @@ -1213,9 +1263,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_imm_ref(self.mk_region(ty::ReStatic), self.mk_str()) } - pub fn mk_enum(self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { + pub fn mk_adt(self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { // take a copy of substs so that we own the vectors inside - self.mk_ty(TyEnum(def, substs)) + self.mk_ty(TyAdt(def, substs)) } pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> { @@ -1288,18 +1338,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_ty(TyFnPtr(fty)) } - pub fn mk_trait(self, - principal: ty::PolyTraitRef<'tcx>, - bounds: ExistentialBounds<'tcx>) - -> Ty<'tcx> - { - assert!(bound_list_is_sorted(&bounds.projection_bounds)); - - let inner = box TraitTy { - principal: principal, - bounds: bounds - }; - self.mk_ty(TyTrait(inner)) + pub fn mk_trait(self, mut obj: TraitObject<'tcx>) -> Ty<'tcx> { + obj.projection_bounds.sort_by_key(|b| b.sort_key(self)); + self.mk_ty(TyTrait(box obj)) } pub fn mk_projection(self, @@ -1311,11 +1352,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_ty(TyProjection(inner)) } - pub fn mk_struct(self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { - // take a copy of substs so that we own the vectors inside - self.mk_ty(TyStruct(def, substs)) - } - pub fn mk_closure(self, closure_id: DefId, substs: &'tcx Substs<'tcx>, @@ -1351,18 +1387,17 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } pub fn mk_param(self, - space: subst::ParamSpace, index: u32, name: Name) -> Ty<'tcx> { - self.mk_ty(TyParam(ParamTy { space: space, idx: index, name: name })) + self.mk_ty(TyParam(ParamTy { idx: index, name: name })) } pub fn mk_self_type(self) -> Ty<'tcx> { - self.mk_param(subst::SelfSpace, 0, keywords::SelfType.name()) + self.mk_param(0, keywords::SelfType.name()) } pub fn mk_param_from_def(self, def: &ty::TypeParameterDef) -> Ty<'tcx> { - self.mk_param(def.space, def.index, def.name) + self.mk_param(def.index, def.name) } pub fn mk_anon(self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { @@ -1371,9 +1406,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn trait_items(self, trait_did: DefId) -> Rc>> { self.trait_items_cache.memoize(trait_did, || { - let def_ids = self.trait_item_def_ids(trait_did); + let def_ids = self.impl_or_trait_items(trait_did); Rc::new(def_ids.iter() - .map(|d| self.impl_or_trait_item(d.def_id())) + .map(|&def_id| self.impl_or_trait_item(def_id)) .collect()) }) } @@ -1381,13 +1416,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// Obtain the representation annotation for a struct definition. pub fn lookup_repr_hints(self, did: DefId) -> Rc> { self.repr_hint_cache.memoize(did, || { - Rc::new(if did.is_local() { - self.get_attrs(did).iter().flat_map(|meta| { - attr::find_repr_attrs(self.sess.diagnostic(), meta).into_iter() - }).collect() - } else { - self.sess.cstore.repr_attrs(did) - }) + Rc::new(self.get_attrs(did).iter().flat_map(|meta| { + attr::find_repr_attrs(self.sess.diagnostic(), meta).into_iter() + }).collect()) }) } } diff --git a/src/librustc/ty/error.rs b/src/librustc/ty/error.rs index 42d5788568..001f47af68 100644 --- a/src/librustc/ty/error.rs +++ b/src/librustc/ty/error.rs @@ -9,7 +9,6 @@ // except according to those terms. use hir::def_id::DefId; -use ty::subst; use infer::type_variable; use ty::{self, BoundRegion, Region, Ty, TyCtxt}; @@ -42,11 +41,11 @@ pub enum TypeError<'tcx> { FixedArraySize(ExpectedFound), TyParamSize(ExpectedFound), ArgCount, - RegionsDoesNotOutlive(Region, Region), - RegionsNotSame(Region, Region), - RegionsNoOverlap(Region, Region), - RegionsInsufficientlyPolymorphic(BoundRegion, Region), - RegionsOverlyPolymorphic(BoundRegion, Region), + RegionsDoesNotOutlive(&'tcx Region, &'tcx Region), + RegionsNotSame(&'tcx Region, &'tcx Region), + RegionsNoOverlap(&'tcx Region, &'tcx Region), + RegionsInsufficientlyPolymorphic(BoundRegion, &'tcx Region), + RegionsOverlyPolymorphic(BoundRegion, &'tcx Region), Sorts(ExpectedFound>), IntegerAsChar, IntMismatch(ExpectedFound), @@ -99,9 +98,9 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { values.expected, values.found) } - Mutability => write!(f, "values differ in mutability"), + Mutability => write!(f, "types differ in mutability"), BoxMutability => { - write!(f, "boxed values differ in mutability") + write!(f, "boxed types differ in mutability") } VecMutability => write!(f, "vectors differ in mutability"), PtrMutability => write!(f, "pointers differ in mutability"), @@ -211,13 +210,13 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { } impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> { - fn sort_string(&self, tcx: TyCtxt<'a, 'gcx, 'lcx>) -> String { + pub fn sort_string(&self, tcx: TyCtxt<'a, 'gcx, 'lcx>) -> String { match self.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr | ty::TyNever => self.to_string(), ty::TyTuple(ref tys) if tys.is_empty() => self.to_string(), - ty::TyEnum(def, _) => format!("enum `{}`", tcx.item_path_str(def.did)), + ty::TyAdt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)), ty::TyBox(_) => "box".to_string(), ty::TyArray(_, n) => format!("array of {} elements", n), ty::TySlice(_) => "slice".to_string(), @@ -243,10 +242,7 @@ impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> { ty::TyFnDef(..) => format!("fn item"), ty::TyFnPtr(_) => "fn pointer".to_string(), ty::TyTrait(ref inner) => { - format!("trait {}", tcx.item_path_str(inner.principal_def_id())) - } - ty::TyStruct(def, _) => { - format!("struct `{}`", tcx.item_path_str(def.did)) + format!("trait {}", tcx.item_path_str(inner.principal.def_id())) } ty::TyClosure(..) => "closure".to_string(), ty::TyTuple(_) => "tuple".to_string(), @@ -258,7 +254,7 @@ impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> { ty::TyInfer(ty::FreshFloatTy(_)) => "skolemized floating-point type".to_string(), ty::TyProjection(_) => "associated type".to_string(), ty::TyParam(ref p) => { - if p.space == subst::SelfSpace { + if p.is_self() { "Self".to_string() } else { "type parameter".to_string() @@ -297,7 +293,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.note_and_explain_region(db, "concrete lifetime that was found is ", conc_region, ""); } - RegionsOverlyPolymorphic(_, ty::ReVar(_)) => { + RegionsOverlyPolymorphic(_, &ty::ReVar(_)) => { // don't bother to print out the message below for // inference variables, it's not very illuminating. } diff --git a/src/librustc/ty/fast_reject.rs b/src/librustc/ty/fast_reject.rs index 9bf2daeb5f..ee1544d2d9 100644 --- a/src/librustc/ty/fast_reject.rs +++ b/src/librustc/ty/fast_reject.rs @@ -22,14 +22,13 @@ pub enum SimplifiedType { IntSimplifiedType(ast::IntTy), UintSimplifiedType(ast::UintTy), FloatSimplifiedType(ast::FloatTy), - EnumSimplifiedType(DefId), + AdtSimplifiedType(DefId), StrSimplifiedType, VecSimplifiedType, PtrSimplifiedType, NeverSimplifiedType, TupleSimplifiedType(usize), TraitSimplifiedType(DefId), - StructSimplifiedType(DefId), ClosureSimplifiedType(DefId), AnonSimplifiedType(DefId), FunctionSimplifiedType(usize), @@ -56,15 +55,12 @@ pub fn simplify_type<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty::TyInt(int_type) => Some(IntSimplifiedType(int_type)), ty::TyUint(uint_type) => Some(UintSimplifiedType(uint_type)), ty::TyFloat(float_type) => Some(FloatSimplifiedType(float_type)), - ty::TyEnum(def, _) => Some(EnumSimplifiedType(def.did)), + ty::TyAdt(def, _) => Some(AdtSimplifiedType(def.did)), ty::TyStr => Some(StrSimplifiedType), ty::TyArray(..) | ty::TySlice(_) => Some(VecSimplifiedType), ty::TyRawPtr(_) => Some(PtrSimplifiedType), ty::TyTrait(ref trait_info) => { - Some(TraitSimplifiedType(trait_info.principal_def_id())) - } - ty::TyStruct(def, _) => { - Some(StructSimplifiedType(def.did)) + Some(TraitSimplifiedType(trait_info.principal.def_id())) } ty::TyRef(_, mt) => { // since we introduce auto-refs during method lookup, we @@ -75,7 +71,7 @@ pub fn simplify_type<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty::TyBox(_) => { // treat like we would treat `Box` match tcx.lang_items.require_owned_box() { - Ok(def_id) => Some(StructSimplifiedType(def_id)), + Ok(def_id) => Some(AdtSimplifiedType(def_id)), Err(msg) => tcx.sess.fatal(&msg), } } @@ -86,7 +82,7 @@ pub fn simplify_type<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty::TyTuple(ref tys) => { Some(TupleSimplifiedType(tys.len())) } - ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f) => { + ty::TyFnDef(.., ref f) | ty::TyFnPtr(ref f) => { Some(FunctionSimplifiedType(f.sig.0.inputs.len())) } ty::TyProjection(_) | ty::TyParam(_) => { diff --git a/src/librustc/ty/flags.rs b/src/librustc/ty/flags.rs index a428c99119..1434b0e60e 100644 --- a/src/librustc/ty/flags.rs +++ b/src/librustc/ty/flags.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use ty::subst; +use ty::subst::Substs; use ty::{self, Ty, TypeFlags, TypeFoldable}; pub struct FlagComputation { @@ -77,7 +77,7 @@ impl FlagComputation { &ty::TyParam(ref p) => { self.add_flags(TypeFlags::HAS_LOCAL_NAMES); - if p.space == subst::SelfSpace { + if p.is_self() { self.add_flags(TypeFlags::HAS_SELF); } else { self.add_flags(TypeFlags::HAS_PARAMS); @@ -102,7 +102,7 @@ impl FlagComputation { } } - &ty::TyEnum(_, substs) | &ty::TyStruct(_, substs) => { + &ty::TyAdt(_, substs) => { self.add_substs(substs); } @@ -121,17 +121,16 @@ impl FlagComputation { self.add_substs(substs); } - &ty::TyTrait(box ty::TraitTy { ref principal, ref bounds }) => { + &ty::TyTrait(ref obj) => { let mut computation = FlagComputation::new(); - computation.add_substs(principal.0.substs); - for projection_bound in &bounds.projection_bounds { + computation.add_substs(obj.principal.skip_binder().substs); + for projection_bound in &obj.projection_bounds { let mut proj_computation = FlagComputation::new(); - proj_computation.add_projection_predicate(&projection_bound.0); + proj_computation.add_existential_projection(&projection_bound.0); self.add_bound_computation(&proj_computation); } self.add_bound_computation(&computation); - - self.add_bounds(bounds); + self.add_region(obj.region_bound); } &ty::TyBox(tt) | &ty::TyArray(tt, _) | &ty::TySlice(tt) => { @@ -143,7 +142,7 @@ impl FlagComputation { } &ty::TyRef(r, ref m) => { - self.add_region(*r); + self.add_region(r); self.add_ty(m.ty); } @@ -182,8 +181,8 @@ impl FlagComputation { self.add_bound_computation(&computation); } - fn add_region(&mut self, r: ty::Region) { - match r { + fn add_region(&mut self, r: &ty::Region) { + match *r { ty::ReVar(..) => { self.add_flags(TypeFlags::HAS_RE_INFER); self.add_flags(TypeFlags::KEEP_IN_LOCAL_TCX); @@ -204,23 +203,22 @@ impl FlagComputation { } } - fn add_projection_predicate(&mut self, projection_predicate: &ty::ProjectionPredicate) { - self.add_projection_ty(&projection_predicate.projection_ty); - self.add_ty(projection_predicate.ty); + fn add_existential_projection(&mut self, projection: &ty::ExistentialProjection) { + self.add_substs(projection.trait_ref.substs); + self.add_ty(projection.ty); } fn add_projection_ty(&mut self, projection_ty: &ty::ProjectionTy) { self.add_substs(projection_ty.trait_ref.substs); } - fn add_substs(&mut self, substs: &subst::Substs) { - self.add_tys(substs.types.as_slice()); - for &r in &substs.regions { - self.add_region(r); + fn add_substs(&mut self, substs: &Substs) { + for ty in substs.types() { + self.add_ty(ty); } - } - fn add_bounds(&mut self, bounds: &ty::ExistentialBounds) { - self.add_region(bounds.region_bound); + for r in substs.regions() { + self.add_region(r); + } } } diff --git a/src/librustc/ty/fold.rs b/src/librustc/ty/fold.rs index a8826f0b23..886ad8cd86 100644 --- a/src/librustc/ty/fold.rs +++ b/src/librustc/ty/fold.rs @@ -40,7 +40,7 @@ //! and does not need to visit anything else. use middle::region; -use ty::subst; +use ty::subst::Substs; use ty::adjustment; use ty::{self, Binder, Ty, TyCtxt, TypeFlags}; @@ -140,17 +140,13 @@ pub trait TypeFolder<'gcx: 'tcx, 'tcx> : Sized { t.super_fold_with(self) } - fn fold_trait_ref(&mut self, t: &ty::TraitRef<'tcx>) -> ty::TraitRef<'tcx> { - t.super_fold_with(self) - } - fn fold_impl_header(&mut self, imp: &ty::ImplHeader<'tcx>) -> ty::ImplHeader<'tcx> { imp.super_fold_with(self) } fn fold_substs(&mut self, - substs: &'tcx subst::Substs<'tcx>) - -> &'tcx subst::Substs<'tcx> { + substs: &'tcx Substs<'tcx>) + -> &'tcx Substs<'tcx> { substs.super_fold_with(self) } @@ -173,15 +169,10 @@ pub trait TypeFolder<'gcx: 'tcx, 'tcx> : Sized { fty.super_fold_with(self) } - fn fold_region(&mut self, r: ty::Region) -> ty::Region { + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { r.super_fold_with(self) } - fn fold_existential_bounds(&mut self, s: &ty::ExistentialBounds<'tcx>) - -> ty::ExistentialBounds<'tcx> { - s.super_fold_with(self) - } - fn fold_autoref(&mut self, ar: &adjustment::AutoRef<'tcx>) -> adjustment::AutoRef<'tcx> { ar.super_fold_with(self) @@ -197,7 +188,7 @@ pub trait TypeVisitor<'tcx> : Sized { t.super_visit_with(self) } - fn visit_region(&mut self, r: ty::Region) -> bool { + fn visit_region(&mut self, r: &'tcx ty::Region) -> bool { r.super_visit_with(self) } } @@ -231,13 +222,15 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// whether any late-bound regions were skipped pub fn collect_regions(self, value: &T, - region_set: &mut FnvHashSet) + region_set: &mut FnvHashSet<&'tcx ty::Region>) -> bool where T : TypeFoldable<'tcx> { let mut have_bound_regions = false; - self.fold_regions(value, &mut have_bound_regions, - |r, d| { region_set.insert(r.from_depth(d)); r }); + self.fold_regions(value, &mut have_bound_regions, |r, d| { + region_set.insert(self.mk_region(r.from_depth(d))); + r + }); have_bound_regions } @@ -249,7 +242,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { skipped_regions: &mut bool, mut f: F) -> T - where F : FnMut(ty::Region, u32) -> ty::Region, + where F : FnMut(&'tcx ty::Region, u32) -> &'tcx ty::Region, T : TypeFoldable<'tcx>, { value.fold_with(&mut RegionFolder::new(self, skipped_regions, &mut f)) @@ -269,14 +262,14 @@ pub struct RegionFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'tcx>, skipped_regions: &'a mut bool, current_depth: u32, - fld_r: &'a mut (FnMut(ty::Region, u32) -> ty::Region + 'a), + fld_r: &'a mut (FnMut(&'tcx ty::Region, u32) -> &'tcx ty::Region + 'a), } impl<'a, 'gcx, 'tcx> RegionFolder<'a, 'gcx, 'tcx> { pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, skipped_regions: &'a mut bool, fld_r: &'a mut F) -> RegionFolder<'a, 'gcx, 'tcx> - where F : FnMut(ty::Region, u32) -> ty::Region + where F : FnMut(&'tcx ty::Region, u32) -> &'tcx ty::Region { RegionFolder { tcx: tcx, @@ -297,8 +290,8 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionFolder<'a, 'gcx, 'tcx> { t } - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - match r { + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + match *r { ty::ReLateBound(debruijn, _) if debruijn.depth < self.current_depth => { debug!("RegionFolder.fold_region({:?}) skipped bound region (current depth={})", r, self.current_depth); @@ -322,16 +315,16 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionFolder<'a, 'gcx, 'tcx> { struct RegionReplacer<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'tcx>, current_depth: u32, - fld_r: &'a mut (FnMut(ty::BoundRegion) -> ty::Region + 'a), - map: FnvHashMap + fld_r: &'a mut (FnMut(ty::BoundRegion) -> &'tcx ty::Region + 'a), + map: FnvHashMap } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn replace_late_bound_regions(self, value: &Binder, mut f: F) - -> (T, FnvHashMap) - where F : FnMut(ty::BoundRegion) -> ty::Region, + -> (T, FnvHashMap) + where F : FnMut(ty::BoundRegion) -> &'tcx ty::Region, T : TypeFoldable<'tcx>, { let mut replacer = RegionReplacer::new(self, &mut f); @@ -349,7 +342,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { where T : TypeFoldable<'tcx> { self.replace_late_bound_regions(value, |br| { - ty::ReFree(ty::FreeRegion{scope: all_outlive_scope, bound_region: br}) + self.mk_region(ty::ReFree(ty::FreeRegion { + scope: all_outlive_scope, + bound_region: br + })) }).0 } @@ -362,11 +358,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let bound0_value = bound2_value.skip_binder().skip_binder(); let value = self.fold_regions(bound0_value, &mut false, |region, current_depth| { - match region { + match *region { ty::ReLateBound(debruijn, br) if debruijn.depth >= current_depth => { // should be true if no escaping regions from bound2_value assert!(debruijn.depth - current_depth <= 1); - ty::ReLateBound(ty::DebruijnIndex::new(current_depth), br) + self.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(current_depth), br)) } _ => { region @@ -420,7 +416,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn erase_late_bound_regions(self, value: &Binder) -> T where T : TypeFoldable<'tcx> { - self.replace_late_bound_regions(value, |_| ty::ReErased).0 + self.replace_late_bound_regions(value, |_| self.mk_region(ty::ReErased)).0 } /// Rewrite any late-bound regions so that they are anonymous. Region numbers are @@ -437,7 +433,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let mut counter = 0; Binder(self.replace_late_bound_regions(sig, |_| { counter += 1; - ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrAnon(counter)) + self.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrAnon(counter))) }).0) } } @@ -445,7 +441,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { impl<'a, 'gcx, 'tcx> RegionReplacer<'a, 'gcx, 'tcx> { fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, fld_r: &'a mut F) -> RegionReplacer<'a, 'gcx, 'tcx> - where F : FnMut(ty::BoundRegion) -> ty::Region + where F : FnMut(ty::BoundRegion) -> &'tcx ty::Region { RegionReplacer { tcx: tcx, @@ -474,22 +470,22 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionReplacer<'a, 'gcx, 'tcx> { t.super_fold_with(self) } - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - match r { + fn fold_region(&mut self, r:&'tcx ty::Region) -> &'tcx ty::Region { + match *r { ty::ReLateBound(debruijn, br) if debruijn.depth == self.current_depth => { let fld_r = &mut self.fld_r; let region = *self.map.entry(br).or_insert_with(|| fld_r(br)); - if let ty::ReLateBound(debruijn1, br) = region { + if let ty::ReLateBound(debruijn1, br) = *region { // If the callback returns a late-bound region, // that region should always use depth 1. Then we // adjust it to the correct depth. assert_eq!(debruijn1.depth, 1); - ty::ReLateBound(debruijn, br) + self.tcx.mk_region(ty::ReLateBound(debruijn, br)) } else { region } } - r => r + _ => r } } } @@ -537,7 +533,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { u.super_fold_with(self) } - fn fold_region(&mut self, r: ty::Region) -> ty::Region { + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { // because late-bound regions affect subtyping, we can't // erase the bound/free distinction, but we can replace // all free regions with 'erased. @@ -546,9 +542,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // type system never "sees" those, they get substituted // away. In trans, they will always be erased to 'erased // whenever a substitution occurs. - match r { + match *r { ty::ReLateBound(..) => r, - _ => ty::ReErased + _ => self.tcx().mk_region(ty::ReErased) } } } @@ -583,7 +579,7 @@ pub fn shift_regions<'a, 'gcx, 'tcx, T>(tcx: TyCtxt<'a, 'gcx, 'tcx>, value, amount); value.fold_with(&mut RegionFolder::new(tcx, &mut false, &mut |region, _current_depth| { - shift_region(region, amount) + tcx.mk_region(shift_region(*region, amount)) })) } @@ -625,7 +621,7 @@ impl<'tcx> TypeVisitor<'tcx> for HasEscapingRegionsVisitor { t.region_depth > self.depth } - fn visit_region(&mut self, r: ty::Region) -> bool { + fn visit_region(&mut self, r: &'tcx ty::Region) -> bool { r.escapes_depth(self.depth) } } @@ -639,17 +635,18 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor { t.flags.get().intersects(self.flags) } - fn visit_region(&mut self, r: ty::Region) -> bool { + fn visit_region(&mut self, r: &'tcx ty::Region) -> bool { if self.flags.intersects(ty::TypeFlags::HAS_LOCAL_NAMES) { // does this represent a region that cannot be named // in a global way? used in fulfillment caching. - match r { + match *r { ty::ReStatic | ty::ReEmpty | ty::ReErased => {} _ => return true, } } - if self.flags.intersects(ty::TypeFlags::HAS_RE_INFER) { - match r { + if self.flags.intersects(ty::TypeFlags::HAS_RE_INFER | + ty::TypeFlags::KEEP_IN_LOCAL_TCX) { + match *r { ty::ReVar(_) | ty::ReSkolemized(..) => { return true } _ => {} } @@ -697,8 +694,8 @@ impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector { t.super_visit_with(self) } - fn visit_region(&mut self, r: ty::Region) -> bool { - match r { + fn visit_region(&mut self, r: &'tcx ty::Region) -> bool { + match *r { ty::ReLateBound(debruijn, br) if debruijn.depth == self.current_depth => { self.regions.insert(br); } diff --git a/src/librustc/ty/item_path.rs b/src/librustc/ty/item_path.rs index 5e4e7b342d..fdf5185eb6 100644 --- a/src/librustc/ty/item_path.rs +++ b/src/librustc/ty/item_path.rs @@ -9,8 +9,7 @@ // except according to those terms. use hir::map::DefPathData; -use middle::cstore::LOCAL_CRATE; -use hir::def_id::{DefId, CRATE_DEF_INDEX}; +use hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; use ty::{self, Ty, TyCtxt}; use syntax::ast; use syntax::parse::token; @@ -67,7 +66,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// Returns the "path" to a particular crate. This can proceed in /// various ways, depending on the `root_mode` of the `buffer`. /// (See `RootMode` enum for more details.) - pub fn push_krate_path(self, buffer: &mut T, cnum: ast::CrateNum) + pub fn push_krate_path(self, buffer: &mut T, cnum: CrateNum) where T: ItemPathBuffer { match *buffer.root_mode() { @@ -102,11 +101,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { RootMode::Absolute => { // In absolute mode, just write the crate name // unconditionally. - if cnum == LOCAL_CRATE { - buffer.push(&self.crate_name(cnum)); - } else { - buffer.push(&self.sess.cstore.original_crate_name(cnum)); - } + buffer.push(&self.original_crate_name(cnum)); } } } @@ -139,7 +134,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - cur_path.push(self.sess.cstore.opt_item_name(cur_def).unwrap_or_else(|| + cur_path.push(self.sess.cstore.def_key(cur_def) + .disambiguated_data.data.get_opt_name().unwrap_or_else(|| token::intern(""))); match visible_parent_map.get(&cur_def) { Some(&def) => cur_def = def, @@ -262,9 +258,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // impl on `Foo`, but fallback to `::bar` if self-type is // anything other than a simple path. match self_ty.sty { - ty::TyStruct(adt_def, substs) | - ty::TyEnum(adt_def, substs) => { - if substs.types.is_empty() { // ignore regions + ty::TyAdt(adt_def, substs) => { + if substs.types().next().is_none() { // ignore regions self.push_item_path(buffer, adt_def.did); } else { buffer.push(&format!("<{}>", self_ty)); @@ -305,7 +300,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// Returns the def-id of `def_id`'s parent in the def tree. If /// this returns `None`, then `def_id` represents a crate root or /// inlined root. - pub fn parent_def_id(&self, def_id: DefId) -> Option { + pub fn parent_def_id(self, def_id: DefId) -> Option { let key = self.def_key(def_id); key.parent.map(|index| DefId { krate: def_id.krate, index: index }) } @@ -319,10 +314,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// decisions and we may want to adjust it later. pub fn characteristic_def_id_of_type(ty: Ty) -> Option { match ty.sty { - ty::TyStruct(adt_def, _) | - ty::TyEnum(adt_def, _) => Some(adt_def.did), + ty::TyAdt(adt_def, _) => Some(adt_def.did), - ty::TyTrait(ref data) => Some(data.principal_def_id()), + ty::TyTrait(ref data) => Some(data.principal.def_id()), ty::TyArray(subty, _) | ty::TySlice(subty) | @@ -335,7 +329,7 @@ pub fn characteristic_def_id_of_type(ty: Ty) -> Option { .filter_map(|ty| characteristic_def_id_of_type(ty)) .next(), - ty::TyFnDef(def_id, _, _) | + ty::TyFnDef(def_id, ..) | ty::TyClosure(def_id, _) => Some(def_id), ty::TyBool | diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 7c944020e9..ed945534e1 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -45,6 +45,7 @@ pub struct TargetDataLayout { } impl Default for TargetDataLayout { + /// Creates an instance of `TargetDataLayout`. fn default() -> TargetDataLayout { TargetDataLayout { endian: Endian::Big, @@ -327,6 +328,42 @@ pub enum Integer { } impl Integer { + pub fn size(&self) -> Size { + match *self { + I1 => Size::from_bits(1), + I8 => Size::from_bytes(1), + I16 => Size::from_bytes(2), + I32 => Size::from_bytes(4), + I64 => Size::from_bytes(8), + } + } + + pub fn align(&self, dl: &TargetDataLayout)-> Align { + match *self { + I1 => dl.i1_align, + I8 => dl.i8_align, + I16 => dl.i16_align, + I32 => dl.i32_align, + I64 => dl.i64_align, + } + } + + pub fn to_ty<'a, 'tcx>(&self, tcx: &ty::TyCtxt<'a, 'tcx, 'tcx>, + signed: bool) -> Ty<'tcx> { + match (*self, signed) { + (I1, false) => tcx.types.u8, + (I8, false) => tcx.types.u8, + (I16, false) => tcx.types.u16, + (I32, false) => tcx.types.u32, + (I64, false) => tcx.types.u64, + (I1, true) => tcx.types.i8, + (I8, true) => tcx.types.i8, + (I16, true) => tcx.types.i16, + (I32, true) => tcx.types.i32, + (I64, true) => tcx.types.i64, + } + } + /// Find the smallest Integer type which can represent the signed value. pub fn fit_signed(x: i64) -> Integer { match x { @@ -349,6 +386,18 @@ impl Integer { } } + /// Find the smallest integer with the given alignment. + pub fn for_abi_align(dl: &TargetDataLayout, align: Align) -> Option { + let wanted = align.abi(); + for &candidate in &[I8, I16, I32, I64] { + let ty = Int(candidate); + if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() { + return Some(candidate); + } + } + None + } + /// Get the Integer type from an attr::IntType. pub fn from_attr(dl: &TargetDataLayout, ity: attr::IntType) -> Integer { match ity { @@ -366,7 +415,7 @@ impl Integer { /// signed discriminant range and #[repr] attribute. /// N.B.: u64 values above i64::MAX will be treated as signed, but /// that shouldn't affect anything, other than maybe debuginfo. - pub fn repr_discr(tcx: TyCtxt, hint: attr::ReprAttr, min: i64, max: i64) + pub fn repr_discr(tcx: TyCtxt, ty: Ty, hint: attr::ReprAttr, min: i64, max: i64) -> (Integer, bool) { // Theoretically, negative values could be larger in unsigned representation // than the unsigned representation of the signed minimum. However, if there @@ -376,11 +425,12 @@ impl Integer { let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max)); let at_least = match hint { - attr::ReprInt(span, ity) => { + attr::ReprInt(ity) => { let discr = Integer::from_attr(&tcx.data_layout, ity); let fit = if ity.is_signed() { signed_fit } else { unsigned_fit }; if discr < fit { - span_bug!(span, "representation hint insufficient for discriminant range") + bug!("Integer::repr_discr: `#[repr]` hint too small for \ + discriminant range of enum `{}", ty) } return (discr, ity.is_signed()); } @@ -396,10 +446,10 @@ impl Integer { } attr::ReprAny => I8, attr::ReprPacked => { - bug!("Integer::repr_discr: found #[repr(packed)] on an enum"); + bug!("Integer::repr_discr: found #[repr(packed)] on enum `{}", ty); } attr::ReprSimd => { - bug!("Integer::repr_discr: found #[repr(simd)] on an enum"); + bug!("Integer::repr_discr: found #[repr(simd)] on enum `{}", ty); } }; @@ -488,7 +538,7 @@ impl<'a, 'gcx, 'tcx> Struct { for field in fields { if !self.sized { - bug!("Struct::compute: field #{} of `{}` comes after unsized field", + bug!("Struct::extend: field #{} of `{}` comes after unsized field", self.offset_after_field.len(), scapegoat); } @@ -555,7 +605,7 @@ impl<'a, 'gcx, 'tcx> Struct { } // Is this the NonZero lang item wrapping a pointer or integer type? - (&Univariant { non_zero: true, .. }, &ty::TyStruct(def, substs)) => { + (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => { let fields = &def.struct_variant().fields; assert_eq!(fields.len(), 1); match *fields[0].ty(tcx, substs).layout(infcx)? { @@ -573,7 +623,7 @@ impl<'a, 'gcx, 'tcx> Struct { // Perhaps one of the fields of this struct is non-zero // let's recurse and find out - (_, &ty::TyStruct(def, substs)) => { + (_, &ty::TyAdt(def, substs)) if def.is_struct() => { Struct::non_zero_field_path(infcx, def.struct_variant().fields .iter().map(|field| { field.ty(tcx, substs) @@ -621,6 +671,63 @@ impl<'a, 'gcx, 'tcx> Struct { } Ok(None) } + + pub fn offset_of_field(&self, index: usize) -> Size { + assert!(index < self.offset_after_field.len()); + if index == 0 { + Size::from_bytes(0) + } else { + self.offset_after_field[index-1] + } + } +} + +/// An untagged union. +#[derive(PartialEq, Eq, Hash, Debug)] +pub struct Union { + pub align: Align, + + pub min_size: Size, + + /// If true, no alignment padding is used. + pub packed: bool, +} + +impl<'a, 'gcx, 'tcx> Union { + pub fn new(dl: &TargetDataLayout, packed: bool) -> Union { + Union { + align: if packed { dl.i8_align } else { dl.aggregate_align }, + min_size: Size::from_bytes(0), + packed: packed, + } + } + + /// Extend the Struct with more fields. + pub fn extend(&mut self, dl: &TargetDataLayout, + fields: I, + scapegoat: Ty<'gcx>) + -> Result<(), LayoutError<'gcx>> + where I: Iterator>> { + for (index, field) in fields.enumerate() { + let field = field?; + if field.is_unsized() { + bug!("Union::extend: field #{} of `{}` is unsized", + index, scapegoat); + } + + if !self.packed { + self.align = self.align.max(field.align(dl)); + } + self.min_size = cmp::max(self.min_size, field.size(dl)); + } + + Ok(()) + } + + /// Get the size with trailing aligment padding. + pub fn stride(&self) -> Size { + self.min_size.abi_align(self.align) + } } /// The first half of a fat pointer. @@ -646,7 +753,7 @@ pub enum Layout { non_zero: bool }, - /// SIMD vectors, from TyStruct marked with #[repr(simd)]. + /// SIMD vectors, from structs marked with #[repr(simd)]. Vector { element: Primitive, count: u64 @@ -667,7 +774,7 @@ pub enum Layout { non_zero: bool }, - // Remaining variants are all ADTs such as TyStruct, TyEnum or TyTuple. + // Remaining variants are all ADTs such as structs, enums or tuples. /// C-like enums; basically an integer. CEnum { @@ -690,6 +797,11 @@ pub enum Layout { non_zero: bool }, + /// Untagged unions. + UntaggedUnion { + variants: Union, + }, + /// General-case enums: for each case there is a struct, and they /// all start with a field for the discriminant. General { @@ -857,7 +969,7 @@ impl<'a, 'gcx, 'tcx> Layout { Univariant { variant: unit, non_zero: false } } - // Tuples. + // Tuples and closures. ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys, .. }) | ty::TyTuple(tys) => { let mut st = Struct::new(dl, false); @@ -865,70 +977,41 @@ impl<'a, 'gcx, 'tcx> Layout { Univariant { variant: st, non_zero: false } } - // ADTs. - ty::TyStruct(def, substs) => { - if ty.is_simd() { - // SIMD vector types. - let element = ty.simd_type(tcx); - match *element.layout(infcx)? { - Scalar { value, .. } => { - return success(Vector { - element: value, - count: ty.simd_size(tcx) as u64 - }); - } - _ => { - tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \ - a non-machine element type `{}`", - ty, element)); - } + // SIMD vector types. + ty::TyAdt(def, ..) if def.is_simd() => { + let element = ty.simd_type(tcx); + match *element.layout(infcx)? { + Scalar { value, .. } => { + return success(Vector { + element: value, + count: ty.simd_size(tcx) as u64 + }); + } + _ => { + tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \ + a non-machine element type `{}`", + ty, element)); } - } - let fields = def.struct_variant().fields.iter().map(|field| { - field.ty(tcx, substs).layout(infcx) - }); - let packed = tcx.lookup_packed(def.did); - let mut st = Struct::new(dl, packed); - st.extend(dl, fields, ty)?; - - // FIXME(16758) don't add a drop flag to unsized structs, as it - // won't actually be in the location we say it is because it'll be after - // the unsized field. Several other pieces of code assume that the unsized - // field is definitely the last one. - if def.dtor_kind().has_drop_flag() && - ty.is_sized(tcx, &infcx.parameter_environment, DUMMY_SP) { - st.extend(dl, Some(Ok(&Scalar { - value: Int(I8), - non_zero: false - })).into_iter(), ty)?; - } - Univariant { - variant: st, - non_zero: Some(def.did) == tcx.lang_items.non_zero() } } - ty::TyEnum(def, substs) => { + + // ADTs. + ty::TyAdt(def, substs) => { let hint = *tcx.lookup_repr_hints(def.did).get(0) .unwrap_or(&attr::ReprAny); - let dtor = def.dtor_kind().has_drop_flag(); - let drop_flag = if dtor { - Some(Scalar { value: Int(I8), non_zero: false }) - } else { - None - }; - if def.variants.is_empty() { // Uninhabitable; represent as unit // (Typechecking will reject discriminant-sizing attrs.) assert_eq!(hint, attr::ReprAny); - let mut st = Struct::new(dl, false); - st.extend(dl, drop_flag.iter().map(Ok), ty)?; - return success(Univariant { variant: st, non_zero: false }); + return success(Univariant { + variant: Struct::new(dl, false), + non_zero: false + }); } - if !dtor && def.variants.iter().all(|v| v.fields.is_empty()) { + if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) { // All bodies empty -> intlike let (mut min, mut max) = (i64::MAX, i64::MIN); for v in &def.variants { @@ -937,7 +1020,7 @@ impl<'a, 'gcx, 'tcx> Layout { if x > max { max = x; } } - let (discr, signed) = Integer::repr_discr(tcx, hint, min, max); + let (discr, signed) = Integer::repr_discr(tcx, ty, hint, min, max); return success(CEnum { discr: discr, signed: signed, @@ -946,34 +1029,43 @@ impl<'a, 'gcx, 'tcx> Layout { }); } + if !def.is_enum() || def.variants.len() == 1 && hint == attr::ReprAny { + // Struct, or union, or univariant enum equivalent to a struct. + // (Typechecking will reject discriminant-sizing attrs.) + + let fields = def.variants[0].fields.iter().map(|field| { + field.ty(tcx, substs).layout(infcx) + }); + let packed = tcx.lookup_packed(def.did); + let layout = if def.is_union() { + let mut un = Union::new(dl, packed); + un.extend(dl, fields, ty)?; + UntaggedUnion { variants: un } + } else { + let mut st = Struct::new(dl, packed); + st.extend(dl, fields, ty)?; + let non_zero = Some(def.did) == tcx.lang_items.non_zero(); + Univariant { variant: st, non_zero: non_zero } + }; + return success(layout); + } + // Since there's at least one // non-empty body, explicit discriminants should have // been rejected by a checker before this point. for (i, v) in def.variants.iter().enumerate() { if i as u64 != v.disr_val.to_u64_unchecked() { bug!("non-C-like enum {} with specified discriminants", - tcx.item_path_str(def.did)); + tcx.item_path_str(def.did)); } } - if def.variants.len() == 1 { - // Equivalent to a struct/tuple/newtype. - // (Typechecking will reject discriminant-sizing attrs.) - assert_eq!(hint, attr::ReprAny); - let fields = def.variants[0].fields.iter().map(|field| { - field.ty(tcx, substs).layout(infcx) - }); - let mut st = Struct::new(dl, false); - st.extend(dl, fields.chain(drop_flag.iter().map(Ok)), ty)?; - return success(Univariant { variant: st, non_zero: false }); - } - // Cache the substituted and normalized variant field types. let variants = def.variants.iter().map(|v| { v.fields.iter().map(|field| field.ty(tcx, substs)).collect::>() }).collect::>(); - if !dtor && variants.len() == 2 && hint == attr::ReprAny { + if variants.len() == 2 && hint == attr::ReprAny { // Nullable pointer optimization for discr in 0..2 { let other_fields = variants[1 - discr].iter().map(|ty| { @@ -997,8 +1089,8 @@ impl<'a, 'gcx, 'tcx> Layout { } _ => { bug!("Layout::compute: `{}`'s non-zero \ - `{}` field not scalar?!", - ty, variants[discr][0]) + `{}` field not scalar?!", + ty, variants[discr][0]) } } } @@ -1018,7 +1110,7 @@ impl<'a, 'gcx, 'tcx> Layout { // The general case. let discr_max = (variants.len() - 1) as i64; assert!(discr_max >= 0); - let (min_ity, _) = Integer::repr_discr(tcx, hint, 0, discr_max); + let (min_ity, _) = Integer::repr_discr(tcx, ty, hint, 0, discr_max); let mut align = dl.aggregate_align; let mut size = Size::from_bytes(0); @@ -1045,8 +1137,7 @@ impl<'a, 'gcx, 'tcx> Layout { Ok(field) }); let mut st = Struct::new(dl, false); - st.extend(dl, discr.iter().map(Ok).chain(fields) - .chain(drop_flag.iter().map(Ok)), ty)?; + st.extend(dl, discr.iter().map(Ok).chain(fields), ty)?; size = cmp::max(size, st.min_size()); align = align.max(st.align); Ok(st) @@ -1069,20 +1160,7 @@ impl<'a, 'gcx, 'tcx> Layout { // won't be so conservative. // Use the initial field alignment - let wanted = start_align.abi(); - let mut ity = min_ity; - for &candidate in &[I16, I32, I64] { - let ty = Int(candidate); - if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() { - ity = candidate; - break; - } - } - - // FIXME(eddyb) conservative only to avoid diverging from trans::adt. - if align.abi() != start_align.abi() { - ity = min_ity; - } + let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity); // If the alignment is not larger than the chosen discriminant size, // don't use the alignment as the final size. @@ -1133,7 +1211,7 @@ impl<'a, 'gcx, 'tcx> Layout { pub fn is_unsized(&self) -> bool { match *self { Scalar {..} | Vector {..} | FatPointer {..} | - CEnum {..} | General {..} | + CEnum {..} | UntaggedUnion {..} | General {..} | RawNullablePointer {..} | StructWrappedNullablePointer {..} => false, @@ -1167,6 +1245,7 @@ impl<'a, 'gcx, 'tcx> Layout { CEnum { discr, .. } => Int(discr).size(dl), Array { size, .. } | General { size, .. } => size, + UntaggedUnion { ref variants } => variants.stride(), Univariant { ref variant, .. } | StructWrappedNullablePointer { nonnull: ref variant, .. } => { @@ -1206,6 +1285,7 @@ impl<'a, 'gcx, 'tcx> Layout { CEnum { discr, .. } => Int(discr).align(dl), Array { align, .. } | General { align, .. } => align, + UntaggedUnion { ref variants } => variants.align, Univariant { ref variant, .. } | StructWrappedNullablePointer { nonnull: ref variant, .. } => { @@ -1271,14 +1351,9 @@ impl<'a, 'gcx, 'tcx> SizeSkeleton<'gcx> { } } - ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { + ty::TyAdt(def, substs) => { // Only newtypes and enums w/ nullable pointer optimization. - if def.variants.is_empty() || def.variants.len() > 2 { - return Err(err); - } - - // If there's a drop flag, it can't be just a pointer. - if def.dtor_kind().has_drop_flag() { + if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 { return Err(err); } diff --git a/src/librustc/ty/maps.rs b/src/librustc/ty/maps.rs index 57b1dd66be..3a552a8b43 100644 --- a/src/librustc/ty/maps.rs +++ b/src/librustc/ty/maps.rs @@ -10,7 +10,7 @@ use dep_graph::{DepNode, DepTrackingMapConfig}; use hir::def_id::DefId; -use ty; +use ty::{self, Ty}; use std::marker::PhantomData; use std::rc::Rc; use syntax::{attr, ast}; @@ -30,16 +30,16 @@ macro_rules! dep_map_ty { } dep_map_ty! { ImplOrTraitItems: ImplOrTraitItems(DefId) -> ty::ImplOrTraitItem<'tcx> } -dep_map_ty! { Tcache: ItemSignature(DefId) -> ty::TypeScheme<'tcx> } +dep_map_ty! { Tcache: ItemSignature(DefId) -> Ty<'tcx> } +dep_map_ty! { Generics: ItemSignature(DefId) -> &'tcx ty::Generics<'tcx> } dep_map_ty! { Predicates: ItemSignature(DefId) -> ty::GenericPredicates<'tcx> } dep_map_ty! { SuperPredicates: ItemSignature(DefId) -> ty::GenericPredicates<'tcx> } -dep_map_ty! { TraitItemDefIds: TraitItemDefIds(DefId) -> Rc> } +dep_map_ty! { ImplOrTraitItemDefIds: ImplOrTraitItemDefIds(DefId) -> Rc> } dep_map_ty! { ImplTraitRefs: ItemSignature(DefId) -> Option> } dep_map_ty! { TraitDefs: ItemSignature(DefId) -> &'tcx ty::TraitDef<'tcx> } dep_map_ty! { AdtDefs: ItemSignature(DefId) -> ty::AdtDefMaster<'tcx> } -dep_map_ty! { ItemVariances: ItemSignature(DefId) -> Rc } -dep_map_ty! { InherentImpls: InherentImpls(DefId) -> Rc> } -dep_map_ty! { ImplItems: ImplItems(DefId) -> Vec } +dep_map_ty! { ItemVariances: ItemSignature(DefId) -> Rc> } +dep_map_ty! { InherentImpls: InherentImpls(DefId) -> Vec } dep_map_ty! { TraitItems: TraitItems(DefId) -> Rc>> } dep_map_ty! { ReprHints: ReprHints(DefId) -> Rc> } dep_map_ty! { InlinedClosures: Hir(DefId) -> ast::NodeId } diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 02be1573bf..d38a30ee63 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -pub use self::ImplOrTraitItemId::*; pub use self::Variance::*; pub use self::DtorKind::*; pub use self::ImplOrTraitItemContainer::*; @@ -21,44 +20,45 @@ pub use self::fold::TypeFoldable; use dep_graph::{self, DepNode}; use hir::map as ast_map; use middle; -use middle::cstore::{self, LOCAL_CRATE}; use hir::def::{Def, PathResolution, ExportMap}; -use hir::def_id::DefId; +use hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; use middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem, FnOnceTraitLangItem}; use middle::region::{CodeExtent, ROOT_CODE_EXTENT}; use traits; use ty; -use ty::subst::{Subst, Substs, VecPerParamSpace}; +use ty::subst::{Subst, Substs}; use ty::walk::TypeWalker; use util::common::MemoizationMap; use util::nodemap::NodeSet; use util::nodemap::FnvHashMap; -use serialize::{Encodable, Encoder, Decodable, Decoder}; +use serialize::{self, Encodable, Encoder}; use std::borrow::Cow; -use std::cell::Cell; +use std::cell::{Cell, RefCell}; use std::hash::{Hash, Hasher}; use std::iter; +use std::ops::Deref; use std::rc::Rc; use std::slice; use std::vec::IntoIter; -use syntax::ast::{self, CrateNum, Name, NodeId}; -use syntax::attr::{self, AttrMetaMethods}; -use syntax::parse::token::InternedString; +use syntax::ast::{self, Name, NodeId}; +use syntax::attr; +use syntax::parse::token::{self, InternedString}; use syntax_pos::{DUMMY_SP, Span}; use rustc_const_math::ConstInt; use hir; -use hir::{ItemImpl, ItemTrait, PatKind}; use hir::intravisit::Visitor; pub use self::sty::{Binder, DebruijnIndex}; -pub use self::sty::{BuiltinBound, BuiltinBounds, ExistentialBounds}; +pub use self::sty::{BuiltinBound, BuiltinBounds}; pub use self::sty::{BareFnTy, FnSig, PolyFnSig}; -pub use self::sty::{ClosureTy, InferTy, ParamTy, ProjectionTy, TraitTy}; +pub use self::sty::{ClosureTy, InferTy, ParamTy, ProjectionTy, TraitObject}; pub use self::sty::{ClosureSubsts, TypeAndMut}; pub use self::sty::{TraitRef, TypeVariants, PolyTraitRef}; +pub use self::sty::{ExistentialTraitRef, PolyExistentialTraitRef}; +pub use self::sty::{ExistentialProjection, PolyExistentialProjection}; pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region}; pub use self::sty::Issue32330; pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid, SkolemizedRegionVid}; @@ -120,23 +120,16 @@ pub struct CrateAnalysis<'a> { #[derive(Copy, Clone)] pub enum DtorKind { NoDtor, - TraitDtor(bool) + TraitDtor } impl DtorKind { pub fn is_present(&self) -> bool { match *self { - TraitDtor(..) => true, + TraitDtor => true, _ => false } } - - pub fn has_drop_flag(&self) -> bool { - match self { - &NoDtor => false, - &TraitDtor(flag) => flag - } - } } #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -171,15 +164,14 @@ impl<'a, 'gcx, 'tcx> ImplHeader<'tcx> { -> ImplHeader<'tcx> { let tcx = selcx.tcx(); - let impl_generics = tcx.lookup_item_type(impl_def_id).generics; - let impl_substs = selcx.infcx().fresh_substs_for_generics(DUMMY_SP, &impl_generics); + let impl_substs = selcx.infcx().fresh_substs_for_item(DUMMY_SP, impl_def_id); let header = ImplHeader { impl_def_id: impl_def_id, self_ty: tcx.lookup_item_type(impl_def_id).ty, trait_ref: tcx.impl_trait_ref(impl_def_id), - predicates: tcx.lookup_predicates(impl_def_id).predicates.into_vec(), - }.subst(tcx, &impl_substs); + predicates: tcx.lookup_predicates(impl_def_id).predicates + }.subst(tcx, impl_substs); let traits::Normalized { value: mut header, obligations } = traits::normalize(selcx, traits::ObligationCause::dummy(), &header); @@ -197,23 +189,11 @@ pub enum ImplOrTraitItem<'tcx> { } impl<'tcx> ImplOrTraitItem<'tcx> { - fn id(&self) -> ImplOrTraitItemId { - match *self { - ConstTraitItem(ref associated_const) => { - ConstTraitItemId(associated_const.def_id) - } - MethodTraitItem(ref method) => MethodTraitItemId(method.def_id), - TypeTraitItem(ref associated_type) => { - TypeTraitItemId(associated_type.def_id) - } - } - } - pub fn def(&self) -> Def { match *self { ConstTraitItem(ref associated_const) => Def::AssociatedConst(associated_const.def_id), MethodTraitItem(ref method) => Def::Method(method.def_id), - TypeTraitItem(ref ty) => Def::AssociatedTy(ty.container.id(), ty.def_id), + TypeTraitItem(ref ty) => Def::AssociatedTy(ty.def_id), } } @@ -257,24 +237,7 @@ impl<'tcx> ImplOrTraitItem<'tcx> { } } -#[derive(Clone, Copy, Debug)] -pub enum ImplOrTraitItemId { - ConstTraitItemId(DefId), - MethodTraitItemId(DefId), - TypeTraitItemId(DefId), -} - -impl ImplOrTraitItemId { - pub fn def_id(&self) -> DefId { - match *self { - ConstTraitItemId(def_id) => def_id, - MethodTraitItemId(def_id) => def_id, - TypeTraitItemId(def_id) => def_id, - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Copy)] +#[derive(Clone, Debug, PartialEq, Eq, Copy, RustcEncodable, RustcDecodable)] pub enum Visibility { /// Visible everywhere (including in other crates). Public, @@ -346,40 +309,18 @@ impl Visibility { #[derive(Clone, Debug)] pub struct Method<'tcx> { pub name: Name, - pub generics: Generics<'tcx>, + pub generics: &'tcx Generics<'tcx>, pub predicates: GenericPredicates<'tcx>, pub fty: &'tcx BareFnTy<'tcx>, - pub explicit_self: ExplicitSelfCategory, + pub explicit_self: ExplicitSelfCategory<'tcx>, pub vis: Visibility, pub defaultness: hir::Defaultness, + pub has_body: bool, pub def_id: DefId, pub container: ImplOrTraitItemContainer, } impl<'tcx> Method<'tcx> { - pub fn new(name: Name, - generics: ty::Generics<'tcx>, - predicates: GenericPredicates<'tcx>, - fty: &'tcx BareFnTy<'tcx>, - explicit_self: ExplicitSelfCategory, - vis: Visibility, - defaultness: hir::Defaultness, - def_id: DefId, - container: ImplOrTraitItemContainer) - -> Method<'tcx> { - Method { - name: name, - generics: generics, - predicates: predicates, - fty: fty, - explicit_self: explicit_self, - vis: vis, - defaultness: defaultness, - def_id: def_id, - container: container, - } - } - pub fn container_id(&self) -> DefId { match self.container { TraitContainer(id) => id, @@ -423,12 +364,6 @@ pub struct AssociatedType<'tcx> { pub container: ImplOrTraitItemContainer, } -#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)] -pub struct ItemVariances { - pub types: VecPerParamSpace, - pub regions: VecPerParamSpace, -} - #[derive(Clone, PartialEq, RustcDecodable, RustcEncodable, Copy)] pub enum Variance { Covariant, // T <: T iff A <: B -- e.g., function return type @@ -437,12 +372,12 @@ pub enum Variance { Bivariant, // T <: T -- e.g., unused type parameter } -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, RustcDecodable, RustcEncodable)] pub struct MethodCallee<'tcx> { /// Impl method ID, for inherent methods, or trait method ID, otherwise. pub def_id: DefId, pub ty: Ty<'tcx>, - pub substs: &'tcx subst::Substs<'tcx> + pub substs: &'tcx Substs<'tcx> } /// With method calls, we store some extra information in @@ -583,23 +518,47 @@ impl<'tcx> Hash for TyS<'tcx> { pub type Ty<'tcx> = &'tcx TyS<'tcx>; -impl<'tcx> Encodable for Ty<'tcx> { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - cstore::tls::with_encoding_context(s, |ecx, rbml_w| { - ecx.encode_ty(rbml_w, *self); - Ok(()) - }) +impl<'tcx> serialize::UseSpecializedEncodable for Ty<'tcx> {} +impl<'tcx> serialize::UseSpecializedDecodable for Ty<'tcx> {} + +/// A wrapper for slices with the additioanl invariant +/// that the slice is interned and no other slice with +/// the same contents can exist in the same context. +/// This means we can use pointer + length for both +/// equality comparisons and hashing. +#[derive(Debug, RustcEncodable)] +pub struct Slice([T]); + +impl PartialEq for Slice { + #[inline] + fn eq(&self, other: &Slice) -> bool { + (&self.0 as *const [T]) == (&other.0 as *const [T]) } } +impl Eq for Slice {} -impl<'tcx> Decodable for Ty<'tcx> { - fn decode(d: &mut D) -> Result, D::Error> { - cstore::tls::with_decoding_context(d, |dcx, rbml_r| { - Ok(dcx.decode_ty(rbml_r)) - }) +impl Hash for Slice { + fn hash(&self, s: &mut H) { + (self.as_ptr(), self.len()).hash(s) + } +} + +impl Deref for Slice { + type Target = [T]; + fn deref(&self) -> &[T] { + &self.0 + } +} + +impl<'a, T> IntoIterator for &'a Slice { + type Item = &'a T; + type IntoIter = <&'a [T] as IntoIterator>::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self[..].iter() } } +impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Slice> {} /// Upvars do not get their own node-id. Instead, we use the pair of /// the original var id (that is, the root variable that is referenced @@ -658,29 +617,29 @@ pub enum BorrowKind { /// Information describing the capture of an upvar. This is computed /// during `typeck`, specifically by `regionck`. -#[derive(PartialEq, Clone, Debug, Copy)] -pub enum UpvarCapture { +#[derive(PartialEq, Clone, Debug, Copy, RustcEncodable, RustcDecodable)] +pub enum UpvarCapture<'tcx> { /// Upvar is captured by value. This is always true when the /// closure is labeled `move`, but can also be true in other cases /// depending on inference. ByValue, /// Upvar is captured by reference. - ByRef(UpvarBorrow), + ByRef(UpvarBorrow<'tcx>), } -#[derive(PartialEq, Clone, Copy)] -pub struct UpvarBorrow { +#[derive(PartialEq, Clone, Copy, RustcEncodable, RustcDecodable)] +pub struct UpvarBorrow<'tcx> { /// The kind of borrow: by-ref upvars have access to shared /// immutable borrows, which are not part of the normal language /// syntax. pub kind: BorrowKind, /// Region of the resulting reference. - pub region: ty::Region, + pub region: &'tcx ty::Region, } -pub type UpvarCaptureMap = FnvHashMap; +pub type UpvarCaptureMap<'tcx> = FnvHashMap>; #[derive(Copy, Clone)] pub struct ClosureUpvar<'tcx> { @@ -700,8 +659,8 @@ pub enum IntVarValue { /// from `T:'a` annotations appearing in the type definition. If /// this is `None`, then the default is inherited from the /// surrounding context. See RFC #599 for details. -#[derive(Copy, Clone)] -pub enum ObjectLifetimeDefault { +#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] +pub enum ObjectLifetimeDefault<'tcx> { /// Require an explicit annotation. Occurs when multiple /// `T:'a` constraints are found. Ambiguous, @@ -710,37 +669,39 @@ pub enum ObjectLifetimeDefault { BaseDefault, /// Use the given region as the default. - Specific(Region), + Specific(&'tcx Region), } -#[derive(Clone)] +#[derive(Clone, RustcEncodable, RustcDecodable)] pub struct TypeParameterDef<'tcx> { pub name: Name, pub def_id: DefId, - pub space: subst::ParamSpace, pub index: u32, pub default_def_id: DefId, // for use in error reporing about defaults pub default: Option>, - pub object_lifetime_default: ObjectLifetimeDefault, + pub object_lifetime_default: ObjectLifetimeDefault<'tcx>, } -#[derive(Clone)] -pub struct RegionParameterDef { +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub struct RegionParameterDef<'tcx> { pub name: Name, pub def_id: DefId, - pub space: subst::ParamSpace, pub index: u32, - pub bounds: Vec, + pub bounds: Vec<&'tcx ty::Region>, } -impl RegionParameterDef { +impl<'tcx> RegionParameterDef<'tcx> { pub fn to_early_bound_region(&self) -> ty::Region { - ty::ReEarlyBound(ty::EarlyBoundRegion { - space: self.space, + ty::ReEarlyBound(self.to_early_bound_region_data()) + } + + pub fn to_early_bound_region_data(&self) -> ty::EarlyBoundRegion { + ty::EarlyBoundRegion { index: self.index, name: self.name, - }) + } } + pub fn to_bound_region(&self) -> ty::BoundRegion { // this is an early bound region, so unaffected by #32330 ty::BoundRegion::BrNamed(self.def_id, self.name, Issue32330::WontChange) @@ -749,80 +710,88 @@ impl RegionParameterDef { /// Information about the formal type/lifetime parameters associated /// with an item or method. Analogous to hir::Generics. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] pub struct Generics<'tcx> { - pub types: VecPerParamSpace>, - pub regions: VecPerParamSpace, + pub parent: Option, + pub parent_regions: u32, + pub parent_types: u32, + pub regions: Vec>, + pub types: Vec>, + pub has_self: bool, } impl<'tcx> Generics<'tcx> { - pub fn empty() -> Generics<'tcx> { - Generics { - types: VecPerParamSpace::empty(), - regions: VecPerParamSpace::empty(), - } + pub fn parent_count(&self) -> usize { + self.parent_regions as usize + self.parent_types as usize } - pub fn is_empty(&self) -> bool { - self.types.is_empty() && self.regions.is_empty() + pub fn own_count(&self) -> usize { + self.regions.len() + self.types.len() } - pub fn has_type_params(&self, space: subst::ParamSpace) -> bool { - !self.types.is_empty_in(space) - } - - pub fn has_region_params(&self, space: subst::ParamSpace) -> bool { - !self.regions.is_empty_in(space) + pub fn count(&self) -> usize { + self.parent_count() + self.own_count() } } /// Bounds on generics. #[derive(Clone)] pub struct GenericPredicates<'tcx> { - pub predicates: VecPerParamSpace>, + pub parent: Option, + pub predicates: Vec>, } -impl<'a, 'gcx, 'tcx> GenericPredicates<'tcx> { - pub fn empty() -> GenericPredicates<'tcx> { - GenericPredicates { - predicates: VecPerParamSpace::empty(), - } - } +impl<'tcx> serialize::UseSpecializedEncodable for GenericPredicates<'tcx> {} +impl<'tcx> serialize::UseSpecializedDecodable for GenericPredicates<'tcx> {} +impl<'a, 'gcx, 'tcx> GenericPredicates<'tcx> { pub fn instantiate(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, substs: &Substs<'tcx>) -> InstantiatedPredicates<'tcx> { + let mut instantiated = InstantiatedPredicates::empty(); + self.instantiate_into(tcx, &mut instantiated, substs); + instantiated + } + pub fn instantiate_own(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, substs: &Substs<'tcx>) + -> InstantiatedPredicates<'tcx> { InstantiatedPredicates { - predicates: self.predicates.subst(tcx, substs), + predicates: self.predicates.subst(tcx, substs) } } + fn instantiate_into(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + instantiated: &mut InstantiatedPredicates<'tcx>, + substs: &Substs<'tcx>) { + if let Some(def_id) = self.parent { + tcx.lookup_predicates(def_id).instantiate_into(tcx, instantiated, substs); + } + instantiated.predicates.extend(self.predicates.iter().map(|p| p.subst(tcx, substs))) + } + pub fn instantiate_supertrait(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, poly_trait_ref: &ty::PolyTraitRef<'tcx>) -> InstantiatedPredicates<'tcx> { + assert_eq!(self.parent, None); InstantiatedPredicates { - predicates: self.predicates.map(|pred| { + predicates: self.predicates.iter().map(|pred| { pred.subst_supertrait(tcx, poly_trait_ref) - }) + }).collect() } } } -#[derive(Clone, PartialEq, Eq, Hash)] +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum Predicate<'tcx> { /// Corresponds to `where Foo : Bar`. `Foo` here would be /// the `Self` type of the trait reference and `A`, `B`, and `C` - /// would be the parameters in the `TypeSpace`. + /// would be the type parameters. Trait(PolyTraitPredicate<'tcx>), - /// A predicate created by RFC1592 - Rfc1592(Box>), - /// where `T1 == T2`. Equate(PolyEquatePredicate<'tcx>), /// where 'a : 'b - RegionOutlives(PolyRegionOutlivesPredicate), + RegionOutlives(PolyRegionOutlivesPredicate<'tcx>), /// where T : 'a TypeOutlives(PolyTypeOutlivesPredicate<'tcx>), @@ -837,9 +806,9 @@ pub enum Predicate<'tcx> { /// trait must be object-safe ObjectSafe(DefId), - /// No direct syntax. May be thought of as `where T : FnFoo<...>` for some 'TypeSpace' - /// substitutions `...` and T being a closure type. Satisfied (or refuted) once we know the - /// closure's kind. + /// No direct syntax. May be thought of as `where T : FnFoo<...>` + /// for some substitutions `...` and T being a closure type. + /// Satisfied (or refuted) once we know the closure's kind. ClosureKind(DefId, ClosureKind), } @@ -917,8 +886,6 @@ impl<'a, 'gcx, 'tcx> Predicate<'tcx> { match *self { Predicate::Trait(ty::Binder(ref data)) => Predicate::Trait(ty::Binder(data.subst(tcx, substs))), - Predicate::Rfc1592(ref pi) => - Predicate::Rfc1592(Box::new(pi.subst_supertrait(tcx, trait_ref))), Predicate::Equate(ty::Binder(ref data)) => Predicate::Equate(ty::Binder(data.subst(tcx, substs))), Predicate::RegionOutlives(ty::Binder(ref data)) => @@ -937,7 +904,7 @@ impl<'a, 'gcx, 'tcx> Predicate<'tcx> { } } -#[derive(Clone, PartialEq, Eq, Hash)] +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct TraitPredicate<'tcx> { pub trait_ref: TraitRef<'tcx> } @@ -961,21 +928,20 @@ impl<'tcx> TraitPredicate<'tcx> { // leads to more recompilation. let def_ids: Vec<_> = self.input_types() - .iter() .flat_map(|t| t.walk()) .filter_map(|t| match t.sty { - ty::TyStruct(adt_def, _) | - ty::TyEnum(adt_def, _) => + ty::TyAdt(adt_def, _) => Some(adt_def.did), _ => None }) + .chain(iter::once(self.def_id())) .collect(); - DepNode::TraitSelect(self.def_id(), def_ids) + DepNode::TraitSelect(def_ids) } - pub fn input_types(&self) -> &[Ty<'tcx>] { - self.trait_ref.substs.types.as_slice() + pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { + self.trait_ref.input_types() } pub fn self_ty(&self) -> Ty<'tcx> { @@ -995,15 +961,16 @@ impl<'tcx> PolyTraitPredicate<'tcx> { } } -#[derive(Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct EquatePredicate<'tcx>(pub Ty<'tcx>, pub Ty<'tcx>); // `0 == 1` pub type PolyEquatePredicate<'tcx> = ty::Binder>; -#[derive(Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct OutlivesPredicate(pub A, pub B); // `A : B` pub type PolyOutlivesPredicate = ty::Binder>; -pub type PolyRegionOutlivesPredicate = PolyOutlivesPredicate; -pub type PolyTypeOutlivesPredicate<'tcx> = PolyOutlivesPredicate, ty::Region>; +pub type PolyRegionOutlivesPredicate<'tcx> = PolyOutlivesPredicate<&'tcx ty::Region, + &'tcx ty::Region>; +pub type PolyTypeOutlivesPredicate<'tcx> = PolyOutlivesPredicate, &'tcx ty::Region>; /// This kind of predicate has no *direct* correspondent in the /// syntax, but it roughly corresponds to the syntactic forms: @@ -1017,7 +984,7 @@ pub type PolyTypeOutlivesPredicate<'tcx> = PolyOutlivesPredicate, ty::R /// equality between arbitrary types. Processing an instance of Form /// #2 eventually yields one of these `ProjectionPredicate` /// instances to normalize the LHS. -#[derive(Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct ProjectionPredicate<'tcx> { pub projection_ty: ProjectionTy<'tcx>, pub ty: Ty<'tcx>, @@ -1029,10 +996,6 @@ impl<'tcx> PolyProjectionPredicate<'tcx> { pub fn item_name(&self) -> Name { self.0.projection_ty.item_name // safe to skip the binder to access a name } - - pub fn sort_key(&self) -> (DefId, Name) { - self.0.projection_ty.sort_key() - } } pub trait ToPolyTraitRef<'tcx> { @@ -1092,7 +1055,7 @@ impl<'tcx> ToPredicate<'tcx> for PolyEquatePredicate<'tcx> { } } -impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate { +impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate<'tcx> { fn to_predicate(&self) -> Predicate<'tcx> { Predicate::RegionOutlives(self.clone()) } @@ -1117,10 +1080,7 @@ impl<'tcx> Predicate<'tcx> { pub fn walk_tys(&self) -> IntoIter> { let vec: Vec<_> = match *self { ty::Predicate::Trait(ref data) => { - data.0.trait_ref.substs.types.as_slice().to_vec() - } - ty::Predicate::Rfc1592(ref data) => { - return data.walk_tys() + data.skip_binder().input_types().collect() } ty::Predicate::Equate(ty::Binder(ref data)) => { vec![data.0, data.1] @@ -1132,11 +1092,8 @@ impl<'tcx> Predicate<'tcx> { vec![] } ty::Predicate::Projection(ref data) => { - let trait_inputs = data.0.projection_ty.trait_ref.substs.types.as_slice(); - trait_inputs.iter() - .cloned() - .chain(Some(data.0.ty)) - .collect() + let trait_inputs = data.0.projection_ty.trait_ref.input_types(); + trait_inputs.chain(Some(data.0.ty)).collect() } ty::Predicate::WellFormed(data) => { vec![data] @@ -1162,7 +1119,6 @@ impl<'tcx> Predicate<'tcx> { Predicate::Trait(ref t) => { Some(t.to_poly_trait_ref()) } - Predicate::Rfc1592(..) | Predicate::Projection(..) | Predicate::Equate(..) | Predicate::RegionOutlives(..) | @@ -1197,12 +1153,12 @@ impl<'tcx> Predicate<'tcx> { /// [usize:Bar]]`. #[derive(Clone)] pub struct InstantiatedPredicates<'tcx> { - pub predicates: VecPerParamSpace>, + pub predicates: Vec>, } impl<'tcx> InstantiatedPredicates<'tcx> { pub fn empty() -> InstantiatedPredicates<'tcx> { - InstantiatedPredicates { predicates: VecPerParamSpace::empty() } + InstantiatedPredicates { predicates: vec![] } } pub fn is_empty(&self) -> bool { @@ -1216,15 +1172,15 @@ impl<'tcx> TraitRef<'tcx> { } pub fn self_ty(&self) -> Ty<'tcx> { - self.substs.self_ty().unwrap() + self.substs.type_at(0) } - pub fn input_types(&self) -> &[Ty<'tcx>] { + pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { // Select only the "input types" from a trait-reference. For // now this is all the types that appear in the // trait-reference, but it should eventually exclude // associated types. - self.substs.types.as_slice() + self.substs.types() } } @@ -1249,7 +1205,7 @@ pub struct ParameterEnvironment<'tcx> { /// indicates it must outlive at least the function body (the user /// may specify stronger requirements). This field indicates the /// region of the callee. - pub implicit_region_bound: ty::Region, + pub implicit_region_bound: &'tcx ty::Region, /// Obligations that the caller must satisfy. This is basically /// the set of bounds on the in-scope type parameters, translated @@ -1264,6 +1220,12 @@ pub struct ParameterEnvironment<'tcx> { /// regions don't have this implicit scope and instead introduce /// relationships in the environment. pub free_id_outlive: CodeExtent, + + /// A cache for `moves_by_default`. + pub is_copy_cache: RefCell, bool>>, + + /// A cache for `type_is_sized` + pub is_sized_cache: RefCell, bool>>, } impl<'a, 'tcx> ParameterEnvironment<'tcx> { @@ -1276,6 +1238,8 @@ impl<'a, 'tcx> ParameterEnvironment<'tcx> { implicit_region_bound: self.implicit_region_bound, caller_bounds: caller_bounds, free_id_outlive: self.free_id_outlive, + is_copy_cache: RefCell::new(FnvHashMap()), + is_sized_cache: RefCell::new(FnvHashMap()), } } @@ -1285,28 +1249,22 @@ impl<'a, 'tcx> ParameterEnvironment<'tcx> { match tcx.map.find(id) { Some(ast_map::NodeImplItem(ref impl_item)) => { match impl_item.node { - hir::ImplItemKind::Type(_) | hir::ImplItemKind::Const(_, _) => { + hir::ImplItemKind::Type(_) | hir::ImplItemKind::Const(..) => { // associated types don't have their own entry (for some reason), // so for now just grab environment for the impl let impl_id = tcx.map.get_parent(id); let impl_def_id = tcx.map.local_def_id(impl_id); - let scheme = tcx.lookup_item_type(impl_def_id); - let predicates = tcx.lookup_predicates(impl_def_id); tcx.construct_parameter_environment(impl_item.span, - &scheme.generics, - &predicates, + impl_def_id, tcx.region_maps.item_extent(id)) } hir::ImplItemKind::Method(_, ref body) => { let method_def_id = tcx.map.local_def_id(id); match tcx.impl_or_trait_item(method_def_id) { MethodTraitItem(ref method_ty) => { - let method_generics = &method_ty.generics; - let method_bounds = &method_ty.predicates; tcx.construct_parameter_environment( impl_item.span, - method_generics, - method_bounds, + method_ty.def_id, tcx.region_maps.call_site_extent(id, body.id)) } _ => { @@ -1324,11 +1282,8 @@ impl<'a, 'tcx> ParameterEnvironment<'tcx> { // so for now just grab environment for the trait let trait_id = tcx.map.get_parent(id); let trait_def_id = tcx.map.local_def_id(trait_id); - let trait_def = tcx.lookup_trait_def(trait_def_id); - let predicates = tcx.lookup_predicates(trait_def_id); tcx.construct_parameter_environment(trait_item.span, - &trait_def.generics, - &predicates, + trait_def_id, tcx.region_maps.item_extent(id)) } hir::MethodTraitItem(_, ref body) => { @@ -1338,8 +1293,6 @@ impl<'a, 'tcx> ParameterEnvironment<'tcx> { let method_def_id = tcx.map.local_def_id(id); match tcx.impl_or_trait_item(method_def_id) { MethodTraitItem(ref method_ty) => { - let method_generics = &method_ty.generics; - let method_bounds = &method_ty.predicates; let extent = if let Some(ref body) = *body { // default impl: use call_site extent as free_id_outlive bound. tcx.region_maps.call_site_extent(id, body.id) @@ -1349,8 +1302,7 @@ impl<'a, 'tcx> ParameterEnvironment<'tcx> { }; tcx.construct_parameter_environment( trait_item.span, - method_generics, - method_bounds, + method_ty.def_id, extent) } _ => { @@ -1364,39 +1316,31 @@ impl<'a, 'tcx> ParameterEnvironment<'tcx> { } Some(ast_map::NodeItem(item)) => { match item.node { - hir::ItemFn(_, _, _, _, _, ref body) => { + hir::ItemFn(.., ref body) => { // We assume this is a function. let fn_def_id = tcx.map.local_def_id(id); - let fn_scheme = tcx.lookup_item_type(fn_def_id); - let fn_predicates = tcx.lookup_predicates(fn_def_id); tcx.construct_parameter_environment( item.span, - &fn_scheme.generics, - &fn_predicates, + fn_def_id, tcx.region_maps.call_site_extent(id, body.id)) } hir::ItemEnum(..) | hir::ItemStruct(..) | + hir::ItemUnion(..) | hir::ItemTy(..) | hir::ItemImpl(..) | hir::ItemConst(..) | hir::ItemStatic(..) => { let def_id = tcx.map.local_def_id(id); - let scheme = tcx.lookup_item_type(def_id); - let predicates = tcx.lookup_predicates(def_id); tcx.construct_parameter_environment(item.span, - &scheme.generics, - &predicates, + def_id, tcx.region_maps.item_extent(id)) } hir::ItemTrait(..) => { let def_id = tcx.map.local_def_id(id); - let trait_def = tcx.lookup_trait_def(def_id); - let predicates = tcx.lookup_predicates(def_id); tcx.construct_parameter_environment(item.span, - &trait_def.generics, - &predicates, + def_id, tcx.region_maps.item_extent(id)) } _ => { @@ -1417,11 +1361,8 @@ impl<'a, 'tcx> ParameterEnvironment<'tcx> { } Some(ast_map::NodeForeignItem(item)) => { let def_id = tcx.map.local_def_id(id); - let scheme = tcx.lookup_item_type(def_id); - let predicates = tcx.lookup_predicates(def_id); tcx.construct_parameter_environment(item.span, - &scheme.generics, - &predicates, + def_id, ROOT_CODE_EXTENT) } _ => { @@ -1454,7 +1395,7 @@ impl<'a, 'tcx> ParameterEnvironment<'tcx> { /// `lookup_predicates`. #[derive(Clone, Debug)] pub struct TypeScheme<'tcx> { - pub generics: Generics<'tcx>, + pub generics: &'tcx Generics<'tcx>, pub ty: Ty<'tcx>, } @@ -1467,7 +1408,7 @@ bitflags! { const IS_PHANTOM_DATA = 1 << 3, const IS_SIMD = 1 << 4, const IS_FUNDAMENTAL = 1 << 5, - const IS_NO_DROP_FLAG = 1 << 6, + const IS_UNION = 1 << 6, } } @@ -1541,26 +1482,16 @@ impl<'tcx, 'container> Hash for AdtDefData<'tcx, 'container> { } } -impl<'tcx> Encodable for AdtDef<'tcx> { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { +impl<'tcx> serialize::UseSpecializedEncodable for AdtDef<'tcx> { + fn default_encode(&self, s: &mut S) -> Result<(), S::Error> { self.did.encode(s) } } -impl<'tcx> Decodable for AdtDef<'tcx> { - fn decode(d: &mut D) -> Result, D::Error> { - let def_id: DefId = Decodable::decode(d)?; - - cstore::tls::with_decoding_context(d, |dcx, _| { - let def_id = dcx.translate_def_id(def_id); - Ok(dcx.tcx().lookup_adt_def(def_id)) - }) - } -} - +impl<'tcx> serialize::UseSpecializedDecodable for AdtDef<'tcx> {} #[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum AdtKind { Struct, Enum } +pub enum AdtKind { Struct, Union, Enum } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub enum VariantKind { Struct, Tuple, Unit } @@ -1585,17 +1516,16 @@ impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> { if attr::contains_name(&attrs, "fundamental") { flags = flags | AdtFlags::IS_FUNDAMENTAL; } - if attr::contains_name(&attrs, "unsafe_no_drop_flag") { - flags = flags | AdtFlags::IS_NO_DROP_FLAG; - } if tcx.lookup_simd(did) { flags = flags | AdtFlags::IS_SIMD; } if Some(did) == tcx.lang_items.phantom_data() { flags = flags | AdtFlags::IS_PHANTOM_DATA; } - if let AdtKind::Enum = kind { - flags = flags | AdtFlags::IS_ENUM; + match kind { + AdtKind::Enum => flags = flags | AdtFlags::IS_ENUM, + AdtKind::Union => flags = flags | AdtFlags::IS_UNION, + AdtKind::Struct => {} } AdtDefData { did: did, @@ -1613,16 +1543,49 @@ impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> { self.flags.set(self.flags.get() | AdtFlags::IS_DTORCK_VALID) } + #[inline] + pub fn is_struct(&self) -> bool { + !self.is_union() && !self.is_enum() + } + + #[inline] + pub fn is_union(&self) -> bool { + self.flags.get().intersects(AdtFlags::IS_UNION) + } + + #[inline] + pub fn is_enum(&self) -> bool { + self.flags.get().intersects(AdtFlags::IS_ENUM) + } + /// Returns the kind of the ADT - Struct or Enum. #[inline] pub fn adt_kind(&self) -> AdtKind { - if self.flags.get().intersects(AdtFlags::IS_ENUM) { + if self.is_enum() { AdtKind::Enum + } else if self.is_union() { + AdtKind::Union } else { AdtKind::Struct } } + pub fn descr(&self) -> &'static str { + match self.adt_kind() { + AdtKind::Struct => "struct", + AdtKind::Union => "union", + AdtKind::Enum => "enum", + } + } + + pub fn variant_descr(&self) -> &'static str { + match self.adt_kind() { + AdtKind::Struct => "struct", + AdtKind::Union => "union", + AdtKind::Enum => "variant", + } + } + /// Returns whether this is a dtorck type. If this returns /// true, this type being safe for destruction requires it to be /// alive; Otherwise, only the contents are required to be. @@ -1654,16 +1617,13 @@ impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> { /// Returns whether this type has a destructor. pub fn has_dtor(&self) -> bool { - match self.dtor_kind() { - NoDtor => false, - TraitDtor(..) => true - } + self.dtor_kind().is_present() } /// Asserts this is a struct and returns the struct's unique /// variant. pub fn struct_variant(&self) -> &VariantDefData<'gcx, 'container> { - assert_eq!(self.adt_kind(), AdtKind::Struct); + assert!(!self.is_enum()); &self.variants[0] } @@ -1721,8 +1681,9 @@ impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> { pub fn variant_of_def(&self, def: Def) -> &VariantDefData<'gcx, 'container> { match def { - Def::Variant(_, vid) => self.variant_with_id(vid), - Def::Struct(..) | Def::TyAlias(..) | Def::AssociatedTy(..) => self.struct_variant(), + Def::Variant(vid) => self.variant_with_id(vid), + Def::Struct(..) | Def::Union(..) | + Def::TyAlias(..) | Def::AssociatedTy(..) => self.struct_variant(), _ => bug!("unexpected def {:?} in variant_of_def", def) } } @@ -1737,9 +1698,7 @@ impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> { pub fn dtor_kind(&self) -> DtorKind { match self.destructor.get() { - Some(_) => { - TraitDtor(!self.flags.get().intersects(AdtFlags::IS_NO_DROP_FLAG)) - } + Some(_) => TraitDtor, None => NoDtor, } } @@ -1867,13 +1826,13 @@ impl<'a, 'tcx> AdtDefData<'tcx, 'tcx> { } TyTuple(ref tys) => { - // FIXME(#33242) we only need to constrain the last field - tys.iter().flat_map(|ty| { - self.sized_constraint_for_ty(tcx, stack, ty) - }).collect() + match tys.last() { + None => vec![], + Some(ty) => self.sized_constraint_for_ty(tcx, stack, ty) + } } - TyEnum(adt, substs) | TyStruct(adt, substs) => { + TyAdt(adt, substs) => { // recursive case let adt = tcx.lookup_adt_def_master(adt.did); adt.calculate_sized_constraint_inner(tcx, stack); @@ -1909,9 +1868,7 @@ impl<'a, 'tcx> AdtDefData<'tcx, 'tcx> { }; let sized_predicate = Binder(TraitRef { def_id: sized_trait, - substs: tcx.mk_substs(Substs::new_trait( - vec![], vec![], ty - )) + substs: Substs::new_trait(tcx, ty, &[]) }).to_predicate(); let predicates = tcx.lookup_predicates(self.did).predicates; if predicates.into_iter().any(|p| p == sized_predicate) { @@ -1984,7 +1941,7 @@ impl<'a, 'gcx, 'tcx, 'container> FieldDefData<'tcx, 'container> { /// Records the substitutions used to translate the polytype for an /// item into the monotype of an item reference. -#[derive(Clone)] +#[derive(Clone, RustcEncodable, RustcDecodable)] pub struct ItemSubsts<'tcx> { pub substs: &'tcx Substs<'tcx>, } @@ -2162,7 +2119,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn node_id_item_substs(self, id: NodeId) -> ItemSubsts<'gcx> { match self.tables.borrow().item_substs.get(&id) { None => ItemSubsts { - substs: self.global_tcx().mk_substs(Substs::empty()) + substs: Substs::empty(self.global_tcx()) }, Some(ts) => ts.clone(), } @@ -2241,7 +2198,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { match self.map.find(id) { Some(ast_map::NodeLocal(pat)) => { match pat.node { - PatKind::Binding(_, ref path1, _) => path1.node.as_str(), + hir::PatKind::Binding(_, ref path1, _) => path1.node.as_str(), _ => { bug!("Variable id {} maps to {:?}, not local", id, pat); }, @@ -2304,84 +2261,19 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } pub fn provided_trait_methods(self, id: DefId) -> Vec>> { - if let Some(id) = self.map.as_local_node_id(id) { - if let ItemTrait(_, _, _, ref ms) = self.map.expect_item(id).node { - ms.iter().filter_map(|ti| { - if let hir::MethodTraitItem(_, Some(_)) = ti.node { - match self.impl_or_trait_item(self.map.local_def_id(ti.id)) { - MethodTraitItem(m) => Some(m), - _ => { - bug!("provided_trait_methods(): \ - non-method item found from \ - looking up provided method?!") - } - } - } else { - None - } - }).collect() - } else { - bug!("provided_trait_methods: `{:?}` is not a trait", id) + self.impl_or_trait_items(id).iter().filter_map(|&def_id| { + match self.impl_or_trait_item(def_id) { + MethodTraitItem(ref m) if m.has_body => Some(m.clone()), + _ => None } - } else { - self.sess.cstore.provided_trait_methods(self.global_tcx(), id) - } + }).collect() } - pub fn associated_consts(self, id: DefId) -> Vec>> { + pub fn trait_impl_polarity(self, id: DefId) -> hir::ImplPolarity { if let Some(id) = self.map.as_local_node_id(id) { match self.map.expect_item(id).node { - ItemTrait(_, _, _, ref tis) => { - tis.iter().filter_map(|ti| { - if let hir::ConstTraitItem(_, _) = ti.node { - match self.impl_or_trait_item(self.map.local_def_id(ti.id)) { - ConstTraitItem(ac) => Some(ac), - _ => { - bug!("associated_consts(): \ - non-const item found from \ - looking up a constant?!") - } - } - } else { - None - } - }).collect() - } - ItemImpl(_, _, _, _, _, ref iis) => { - iis.iter().filter_map(|ii| { - if let hir::ImplItemKind::Const(_, _) = ii.node { - match self.impl_or_trait_item(self.map.local_def_id(ii.id)) { - ConstTraitItem(ac) => Some(ac), - _ => { - bug!("associated_consts(): \ - non-const item found from \ - looking up a constant?!") - } - } - } else { - None - } - }).collect() - } - _ => { - bug!("associated_consts: `{:?}` is not a trait or impl", id) - } - } - } else { - self.sess.cstore.associated_consts(self.global_tcx(), id) - } - } - - pub fn trait_impl_polarity(self, id: DefId) -> Option { - if let Some(id) = self.map.as_local_node_id(id) { - match self.map.find(id) { - Some(ast_map::NodeItem(item)) => { - match item.node { - hir::ItemImpl(_, polarity, _, _, _, _) => Some(polarity), - _ => None - } - } - _ => None + hir::ItemImpl(_, polarity, ..) => polarity, + ref item => bug!("trait_impl_polarity: {:?} not an impl", item) } } else { self.sess.cstore.impl_polarity(id) @@ -2414,10 +2306,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { .expect("missing ImplOrTraitItem in metadata")) } - pub fn trait_item_def_ids(self, id: DefId) -> Rc> { + pub fn impl_or_trait_items(self, id: DefId) -> Rc> { lookup_locally_or_in_crate_store( - "trait_item_def_ids", id, &self.trait_item_def_ids, - || Rc::new(self.sess.cstore.trait_item_def_ids(id))) + "impl_or_trait_items", id, &self.impl_or_trait_item_def_ids, + || Rc::new(self.sess.cstore.impl_or_trait_items(id))) } /// Returns the trait-ref corresponding to a given impl, or None if it is @@ -2428,20 +2320,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { || self.sess.cstore.impl_trait_ref(self.global_tcx(), id)) } - /// Returns whether this DefId refers to an impl - pub fn is_impl(self, id: DefId) -> bool { - if let Some(id) = self.map.as_local_node_id(id) { - if let Some(ast_map::NodeItem( - &hir::Item { node: hir::ItemImpl(..), .. })) = self.map.find(id) { - true - } else { - false - } - } else { - self.sess.cstore.is_impl(id) - } - } - /// Returns a path resolution for node id if it exists, panics otherwise. pub fn expect_resolution(self, id: NodeId) -> PathResolution { *self.def_map.borrow().get(&id).expect("no def-map entry for node id") @@ -2462,10 +2340,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // or variant or their constructors, panics otherwise. pub fn expect_variant_def(self, def: Def) -> VariantDef<'tcx> { match def { - Def::Variant(enum_did, did) => { + Def::Variant(did) => { + let enum_did = self.parent_def_id(did).unwrap(); self.lookup_adt_def(enum_did).variant_with_id(did) } - Def::Struct(did) => { + Def::Struct(did) | Def::Union(did) => { self.lookup_adt_def(did).struct_variant() } _ => bug!("expect_variant_def used with unexpected def {:?}", def) @@ -2480,12 +2359,41 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - /// Returns the `DefPath` of an item. Note that if `id` is not - /// local to this crate -- or is inlined into this crate -- the - /// result will be a non-local `DefPath`. + /// Convert a `DefId` into its fully expanded `DefPath` (every + /// `DefId` is really just an interned def-path). + /// + /// Note that if `id` is not local to this crate -- or is + /// inlined into this crate -- the result will be a non-local + /// `DefPath`. + /// + /// This function is only safe to use when you are sure that the + /// full def-path is accessible. Examples that are known to be + /// safe are local def-ids or items; see `opt_def_path` for more + /// details. pub fn def_path(self, id: DefId) -> ast_map::DefPath { + self.opt_def_path(id).unwrap_or_else(|| { + bug!("could not load def-path for {:?}", id) + }) + } + + /// Convert a `DefId` into its fully expanded `DefPath` (every + /// `DefId` is really just an interned def-path). + /// + /// When going across crates, we do not save the full info for + /// every cross-crate def-id, and hence we may not always be able + /// to create a def-path. Therefore, this returns + /// `Option` to cover that possibility. It will always + /// return `Some` for local def-ids, however, as well as for + /// items. The problems arise with "minor" def-ids like those + /// associated with a pattern, `impl Trait`, or other internal + /// detail to a fn. + /// + /// Note that if `id` is not local to this crate -- or is + /// inlined into this crate -- the result will be a non-local + /// `DefPath`. + pub fn opt_def_path(self, id: DefId) -> Option { if id.is_local() { - self.map.def_path(id) + Some(self.map.def_path(id)) } else { self.sess.cstore.relative_def_path(id) } @@ -2494,33 +2402,55 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn item_name(self, id: DefId) -> ast::Name { if let Some(id) = self.map.as_local_node_id(id) { self.map.name(id) + } else if id.index == CRATE_DEF_INDEX { + token::intern(&self.sess.cstore.original_crate_name(id.krate)) } else { - self.sess.cstore.item_name(id) + let def_key = self.sess.cstore.def_key(id); + // The name of a StructCtor is that of its struct parent. + if let ast_map::DefPathData::StructCtor = def_key.disambiguated_data.data { + self.item_name(DefId { + krate: id.krate, + index: def_key.parent.unwrap() + }) + } else { + def_key.disambiguated_data.data.get_opt_name().unwrap_or_else(|| { + bug!("item_name: no name for {:?}", self.def_path(id)); + }) + } } } // Register a given item type - pub fn register_item_type(self, did: DefId, ty: TypeScheme<'gcx>) { - self.tcache.borrow_mut().insert(did, ty); + pub fn register_item_type(self, did: DefId, scheme: TypeScheme<'gcx>) { + self.tcache.borrow_mut().insert(did, scheme.ty); + self.generics.borrow_mut().insert(did, scheme.generics); } // If the given item is in an external crate, looks up its type and adds it to // the type cache. Returns the type parameters and type. pub fn lookup_item_type(self, did: DefId) -> TypeScheme<'gcx> { - lookup_locally_or_in_crate_store( + let ty = lookup_locally_or_in_crate_store( "tcache", did, &self.tcache, - || self.sess.cstore.item_type(self.global_tcx(), did)) + || self.sess.cstore.item_type(self.global_tcx(), did)); + + TypeScheme { + ty: ty, + generics: self.lookup_generics(did) + } } pub fn opt_lookup_item_type(self, did: DefId) -> Option> { - if let Some(scheme) = self.tcache.borrow_mut().get(&did) { - return Some(scheme.clone()); + if did.krate != LOCAL_CRATE { + return Some(self.lookup_item_type(did)); } - if did.krate == LOCAL_CRATE { - None + if let Some(ty) = self.tcache.borrow().get(&did).cloned() { + Some(TypeScheme { + ty: ty, + generics: self.lookup_generics(did) + }) } else { - Some(self.sess.cstore.item_type(self.global_tcx(), did)) + None } } @@ -2549,6 +2479,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.lookup_adt_def_master(did) } + /// Given the did of an item, returns its generics. + pub fn lookup_generics(self, did: DefId) -> &'gcx Generics<'gcx> { + lookup_locally_or_in_crate_store( + "generics", did, &self.generics, + || self.alloc_generics(self.sess.cstore.item_generics(self.global_tcx(), did))) + } + /// Given the did of an item, returns its full set of predicates. pub fn lookup_predicates(self, did: DefId) -> GenericPredicates<'gcx> { lookup_locally_or_in_crate_store( @@ -2622,7 +2559,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { || self.lookup_repr_hints(did).contains(&attr::ReprSimd) } - pub fn item_variances(self, item_id: DefId) -> Rc { + pub fn item_variances(self, item_id: DefId) -> Rc> { lookup_locally_or_in_crate_store( "item_variance_map", item_id, &self.item_variance_map, || Rc::new(self.sess.cstore.item_variances(item_id))) @@ -2659,10 +2596,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { debug!("populate_implementations_for_primitive_if_necessary: searching for {:?}", primitive_def_id); - let impl_items = self.sess.cstore.impl_items(primitive_def_id); + let impl_items = self.sess.cstore.impl_or_trait_items(primitive_def_id); // Store the implementation info. - self.impl_items.borrow_mut().insert(primitive_def_id, impl_items); + self.impl_or_trait_item_def_ids.borrow_mut().insert(primitive_def_id, Rc::new(impl_items)); self.populated_external_primitive_impls.borrow_mut().insert(primitive_def_id); } @@ -2688,11 +2625,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let inherent_impls = self.sess.cstore.inherent_implementations_for_type(type_id); for &impl_def_id in &inherent_impls { // Store the implementation info. - let impl_items = self.sess.cstore.impl_items(impl_def_id); - self.impl_items.borrow_mut().insert(impl_def_id, impl_items); + let impl_items = self.sess.cstore.impl_or_trait_items(impl_def_id); + self.impl_or_trait_item_def_ids.borrow_mut().insert(impl_def_id, Rc::new(impl_items)); } - self.inherent_impls.borrow_mut().insert(type_id, Rc::new(inherent_impls)); + self.inherent_impls.borrow_mut().insert(type_id, inherent_impls); self.populated_external_types.borrow_mut().insert(type_id); } @@ -2718,28 +2655,24 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.record_trait_has_default_impl(trait_id); } - for impl_def_id in self.sess.cstore.implementations_of_trait(trait_id) { - let impl_items = self.sess.cstore.impl_items(impl_def_id); + for impl_def_id in self.sess.cstore.implementations_of_trait(Some(trait_id)) { + let impl_items = self.sess.cstore.impl_or_trait_items(impl_def_id); let trait_ref = self.impl_trait_ref(impl_def_id).unwrap(); // Record the trait->implementation mapping. - if let Some(parent) = self.sess.cstore.impl_parent(impl_def_id) { - def.record_remote_impl(self, impl_def_id, trait_ref, parent); - } else { - def.record_remote_impl(self, impl_def_id, trait_ref, trait_id); - } + let parent = self.sess.cstore.impl_parent(impl_def_id).unwrap_or(trait_id); + def.record_remote_impl(self, impl_def_id, trait_ref, parent); // For any methods that use a default implementation, add them to // the map. This is a bit unfortunate. - for impl_item_def_id in &impl_items { - let method_def_id = impl_item_def_id.def_id(); + for &impl_item_def_id in &impl_items { // load impl items eagerly for convenience // FIXME: we may want to load these lazily - self.impl_or_trait_item(method_def_id); + self.impl_or_trait_item(impl_item_def_id); } // Store the implementation info. - self.impl_items.borrow_mut().insert(impl_def_id, impl_items); + self.impl_or_trait_item_def_ids.borrow_mut().insert(impl_def_id, Rc::new(impl_items)); } def.flags.set(def.flags.get() | TraitFlags::IMPLS_VALID); @@ -2804,18 +2737,18 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - /// If the given def ID describes an item belonging to a trait (either a - /// default method or an implementation of a trait method), return the ID of - /// the trait that the method belongs to. Otherwise, return `None`. + /// If the given def ID describes an item belonging to a trait, + /// return the ID of the trait that the trait item belongs to. + /// Otherwise, return `None`. pub fn trait_of_item(self, def_id: DefId) -> Option { if def_id.krate != LOCAL_CRATE { - return self.sess.cstore.trait_of_item(self.global_tcx(), def_id); + return self.sess.cstore.trait_of_item(def_id); } - match self.impl_or_trait_items.borrow().get(&def_id).cloned() { + match self.impl_or_trait_items.borrow().get(&def_id) { Some(impl_or_trait_item) => { match impl_or_trait_item.container() { TraitContainer(def_id) => Some(def_id), - ImplContainer(def_id) => self.trait_id_of_impl(def_id), + ImplContainer(_) => None } } None => None @@ -2828,19 +2761,21 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// is already that of the original trait method, then the return value is /// the same). /// Otherwise, return `None`. - pub fn trait_item_of_item(self, def_id: DefId) -> Option { - let impl_item = match self.impl_or_trait_items.borrow().get(&def_id) { + pub fn trait_item_of_item(self, def_id: DefId) -> Option { + let impl_or_trait_item = match self.impl_or_trait_items.borrow().get(&def_id) { Some(m) => m.clone(), None => return None, }; - let name = impl_item.name(); - match self.trait_of_item(def_id) { - Some(trait_did) => { - self.trait_items(trait_did).iter() - .find(|item| item.name() == name) - .map(|item| item.id()) + match impl_or_trait_item.container() { + TraitContainer(_) => Some(impl_or_trait_item.def_id()), + ImplContainer(def_id) => { + self.trait_id_of_impl(def_id).and_then(|trait_did| { + let name = impl_or_trait_item.name(); + self.trait_items(trait_did).iter() + .find(|item| item.name() == name) + .map(|item| item.def_id()) + }) } - None => None } } @@ -2852,10 +2787,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // regions, so it shouldn't matter what we use for the free id let free_id_outlive = self.region_maps.node_extent(ast::DUMMY_NODE_ID); ty::ParameterEnvironment { - free_substs: self.mk_substs(Substs::empty()), + free_substs: Substs::empty(self), caller_bounds: Vec::new(), - implicit_region_bound: ty::ReEmpty, - free_id_outlive: free_id_outlive + implicit_region_bound: self.mk_region(ty::ReEmpty), + free_id_outlive: free_id_outlive, + is_copy_cache: RefCell::new(FnvHashMap()), + is_sized_cache: RefCell::new(FnvHashMap()), } } @@ -2864,39 +2801,31 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// In general, this means converting from bound parameters to /// free parameters. Since we currently represent bound/free type /// parameters in the same way, this only has an effect on regions. - pub fn construct_free_substs(self, generics: &Generics<'gcx>, - free_id_outlive: CodeExtent) -> Substs<'gcx> { - // map T => T - let mut types = VecPerParamSpace::empty(); - for def in generics.types.as_slice() { - debug!("construct_parameter_environment(): push_types_from_defs: def={:?}", - def); - types.push(def.space, self.global_tcx().mk_param_from_def(def)); - } + pub fn construct_free_substs(self, def_id: DefId, + free_id_outlive: CodeExtent) + -> &'gcx Substs<'gcx> { - // map bound 'a => free 'a - let mut regions = VecPerParamSpace::empty(); - for def in generics.regions.as_slice() { - let region = - ReFree(FreeRegion { scope: free_id_outlive, - bound_region: def.to_bound_region() }); - debug!("push_region_params {:?}", region); - regions.push(def.space, region); - } + let substs = Substs::for_item(self.global_tcx(), def_id, |def, _| { + // map bound 'a => free 'a + self.global_tcx().mk_region(ReFree(FreeRegion { + scope: free_id_outlive, + bound_region: def.to_bound_region() + })) + }, |def, _| { + // map T => T + self.global_tcx().mk_param_from_def(def) + }); - Substs { - types: types, - regions: regions, - } + debug!("construct_parameter_environment: {:?}", substs); + substs } /// See `ParameterEnvironment` struct def'n for details. /// If you were using `free_id: NodeId`, you might try `self.region_maps.item_extent(free_id)` - /// for the `free_id_outlive` parameter. (But note that that is not always quite right.) + /// for the `free_id_outlive` parameter. (But note that this is not always quite right.) pub fn construct_parameter_environment(self, span: Span, - generics: &ty::Generics<'gcx>, - generic_predicates: &ty::GenericPredicates<'gcx>, + def_id: DefId, free_id_outlive: CodeExtent) -> ParameterEnvironment<'gcx> { @@ -2904,16 +2833,17 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // Construct the free substs. // - let free_substs = self.construct_free_substs(generics, free_id_outlive); + let free_substs = self.construct_free_substs(def_id, free_id_outlive); // // Compute the bounds on Self and the type parameters. // let tcx = self.global_tcx(); - let bounds = generic_predicates.instantiate(tcx, &free_substs); + let generic_predicates = tcx.lookup_predicates(def_id); + let bounds = generic_predicates.instantiate(tcx, free_substs); let bounds = tcx.liberate_late_bound_regions(free_id_outlive, &ty::Binder(bounds)); - let predicates = bounds.predicates.into_vec(); + let predicates = bounds.predicates; // Finally, we have to normalize the bounds in the environment, in // case they contain any associated type projections. This process @@ -2929,16 +2859,22 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // let unnormalized_env = ty::ParameterEnvironment { - free_substs: tcx.mk_substs(free_substs), - implicit_region_bound: ty::ReScope(free_id_outlive), + free_substs: free_substs, + implicit_region_bound: tcx.mk_region(ty::ReScope(free_id_outlive)), caller_bounds: predicates, free_id_outlive: free_id_outlive, + is_copy_cache: RefCell::new(FnvHashMap()), + is_sized_cache: RefCell::new(FnvHashMap()), }; let cause = traits::ObligationCause::misc(span, free_id_outlive.node_id(&self.region_maps)); traits::normalize_param_env_or_error(tcx, unnormalized_env, cause) } + pub fn node_scope_region(self, id: NodeId) -> &'tcx Region { + self.mk_region(ty::ReScope(self.region_maps.node_extent(id))) + } + pub fn is_method_call(self, expr_id: NodeId) -> bool { self.tables.borrow().method_map.contains_key(&MethodCall::expr(expr_id)) } @@ -2948,7 +2884,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { autoderefs)) } - pub fn upvar_capture(self, upvar_id: ty::UpvarId) -> Option { + pub fn upvar_capture(self, upvar_id: ty::UpvarId) -> Option> { Some(self.tables.borrow().upvar_capture_map.get(&upvar_id).unwrap().clone()) } @@ -2973,11 +2909,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } /// The category of explicit self. -#[derive(Clone, Copy, Eq, PartialEq, Debug)] -pub enum ExplicitSelfCategory { +#[derive(Clone, Copy, Eq, PartialEq, Debug, RustcEncodable, RustcDecodable)] +pub enum ExplicitSelfCategory<'tcx> { Static, ByValue, - ByReference(Region, hir::Mutability), + ByReference(&'tcx Region, hir::Mutability), ByBox, } diff --git a/src/librustc/ty/outlives.rs b/src/librustc/ty/outlives.rs index ee431681ad..a4edd3b93c 100644 --- a/src/librustc/ty/outlives.rs +++ b/src/librustc/ty/outlives.rs @@ -17,7 +17,7 @@ use ty::{self, Ty, TypeFoldable}; #[derive(Debug)] pub enum Component<'tcx> { - Region(ty::Region), + Region(&'tcx ty::Region), Param(ty::ParamTy), UnresolvedInferenceVariable(ty::InferTy), @@ -172,8 +172,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ty::TyUint(..) | // OutlivesScalar ty::TyFloat(..) | // OutlivesScalar ty::TyNever | // ... - ty::TyEnum(..) | // OutlivesNominalType - ty::TyStruct(..) | // OutlivesNominalType + ty::TyAdt(..) | // OutlivesNominalType ty::TyBox(..) | // OutlivesNominalType (ish) ty::TyAnon(..) | // OutlivesNominalType (ish) ty::TyStr | // OutlivesScalar (ish) @@ -210,7 +209,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } -fn push_region_constraints<'tcx>(out: &mut Vec>, regions: Vec) { +fn push_region_constraints<'tcx>(out: &mut Vec>, regions: Vec<&'tcx ty::Region>) { for r in regions { if !r.is_bound() { out.push(Component::Region(r)); diff --git a/src/librustc/ty/relate.rs b/src/librustc/ty/relate.rs index 05a9b81115..b10c731fe2 100644 --- a/src/librustc/ty/relate.rs +++ b/src/librustc/ty/relate.rs @@ -14,7 +14,7 @@ //! type equality, etc. use hir::def_id::DefId; -use ty::subst::{ParamSpace, Substs}; +use ty::subst::{Kind, Substs}; use ty::{self, Ty, TyCtxt, TypeFoldable}; use ty::error::{ExpectedFound, TypeError}; use std::rc::Rc; @@ -71,8 +71,8 @@ pub trait TypeRelation<'a, 'gcx: 'a+'tcx, 'tcx: 'a> : Sized { fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>>; - fn regions(&mut self, a: ty::Region, b: ty::Region) - -> RelateResult<'tcx, ty::Region>; + fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region>; fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) -> RelateResult<'tcx, ty::Binder> @@ -139,88 +139,26 @@ fn relate_item_substs<'a, 'gcx, 'tcx, R>(relation: &mut R, } pub fn relate_substs<'a, 'gcx, 'tcx, R>(relation: &mut R, - variances: Option<&ty::ItemVariances>, + variances: Option<&Vec>, a_subst: &'tcx Substs<'tcx>, b_subst: &'tcx Substs<'tcx>) -> RelateResult<'tcx, &'tcx Substs<'tcx>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { - let mut substs = Substs::empty(); - - for &space in &ParamSpace::all() { - let a_tps = a_subst.types.get_slice(space); - let b_tps = b_subst.types.get_slice(space); - let t_variances = variances.map(|v| v.types.get_slice(space)); - let tps = relate_type_params(relation, t_variances, a_tps, b_tps)?; - substs.types.replace(space, tps); - } - - for &space in &ParamSpace::all() { - let a_regions = a_subst.regions.get_slice(space); - let b_regions = b_subst.regions.get_slice(space); - let r_variances = variances.map(|v| v.regions.get_slice(space)); - let regions = relate_region_params(relation, - r_variances, - a_regions, - b_regions)?; - substs.regions.replace(space, regions); - } - - Ok(relation.tcx().mk_substs(substs)) -} - -fn relate_type_params<'a, 'gcx, 'tcx, R>(relation: &mut R, - variances: Option<&[ty::Variance]>, - a_tys: &[Ty<'tcx>], - b_tys: &[Ty<'tcx>]) - -> RelateResult<'tcx, Vec>> - where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a -{ - if a_tys.len() != b_tys.len() { - return Err(TypeError::TyParamSize(expected_found(relation, - &a_tys.len(), - &b_tys.len()))); - } + let tcx = relation.tcx(); - (0 .. a_tys.len()) - .map(|i| { - let a_ty = a_tys[i]; - let b_ty = b_tys[i]; - let v = variances.map_or(ty::Invariant, |v| v[i]); - relation.relate_with_variance(v, &a_ty, &b_ty) - }) - .collect() -} + let params = a_subst.params().iter().zip(b_subst.params()).enumerate().map(|(i, (a, b))| { + let variance = variances.map_or(ty::Invariant, |v| v[i]); + if let (Some(a_ty), Some(b_ty)) = (a.as_type(), b.as_type()) { + Ok(Kind::from(relation.relate_with_variance(variance, &a_ty, &b_ty)?)) + } else if let (Some(a_r), Some(b_r)) = (a.as_region(), b.as_region()) { + Ok(Kind::from(relation.relate_with_variance(variance, &a_r, &b_r)?)) + } else { + bug!() + } + }); -fn relate_region_params<'a, 'gcx, 'tcx, R>(relation: &mut R, - variances: Option<&[ty::Variance]>, - a_rs: &[ty::Region], - b_rs: &[ty::Region]) - -> RelateResult<'tcx, Vec> - where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a -{ - let num_region_params = a_rs.len(); - - debug!("relate_region_params(a_rs={:?}, \ - b_rs={:?}, variances={:?})", - a_rs, - b_rs, - variances); - - assert_eq!(num_region_params, - variances.map_or(num_region_params, - |v| v.len())); - - assert_eq!(num_region_params, b_rs.len()); - - (0..a_rs.len()) - .map(|i| { - let a_r = a_rs[i]; - let b_r = b_rs[i]; - let variance = variances.map_or(ty::Invariant, |v| v[i]); - relation.relate_with_variance(variance, &a_r, &b_r) - }) - .collect() + Substs::maybe_new(tcx, params) } impl<'tcx> Relate<'tcx> for &'tcx ty::BareFnTy<'tcx> { @@ -326,24 +264,33 @@ impl<'tcx> Relate<'tcx> for ty::ProjectionTy<'tcx> { } } -impl<'tcx> Relate<'tcx> for ty::ProjectionPredicate<'tcx> { +impl<'tcx> Relate<'tcx> for ty::ExistentialProjection<'tcx> { fn relate<'a, 'gcx, R>(relation: &mut R, - a: &ty::ProjectionPredicate<'tcx>, - b: &ty::ProjectionPredicate<'tcx>) - -> RelateResult<'tcx, ty::ProjectionPredicate<'tcx>> + a: &ty::ExistentialProjection<'tcx>, + b: &ty::ExistentialProjection<'tcx>) + -> RelateResult<'tcx, ty::ExistentialProjection<'tcx>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { - let projection_ty = relation.relate(&a.projection_ty, &b.projection_ty)?; - let ty = relation.relate(&a.ty, &b.ty)?; - Ok(ty::ProjectionPredicate { projection_ty: projection_ty, ty: ty }) + if a.item_name != b.item_name { + Err(TypeError::ProjectionNameMismatched( + expected_found(relation, &a.item_name, &b.item_name))) + } else { + let trait_ref = relation.relate(&a.trait_ref, &b.trait_ref)?; + let ty = relation.relate(&a.ty, &b.ty)?; + Ok(ty::ExistentialProjection { + trait_ref: trait_ref, + item_name: a.item_name, + ty: ty + }) + } } } -impl<'tcx> Relate<'tcx> for Vec> { +impl<'tcx> Relate<'tcx> for Vec> { fn relate<'a, 'gcx, R>(relation: &mut R, - a: &Vec>, - b: &Vec>) - -> RelateResult<'tcx, Vec>> + a: &Vec>, + b: &Vec>) + -> RelateResult<'tcx, Vec>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { // To be compatible, `a` and `b` must be for precisely the @@ -361,27 +308,6 @@ impl<'tcx> Relate<'tcx> for Vec> { } } -impl<'tcx> Relate<'tcx> for ty::ExistentialBounds<'tcx> { - fn relate<'a, 'gcx, R>(relation: &mut R, - a: &ty::ExistentialBounds<'tcx>, - b: &ty::ExistentialBounds<'tcx>) - -> RelateResult<'tcx, ty::ExistentialBounds<'tcx>> - where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a - { - let r = - relation.with_cause( - Cause::ExistentialRegionBound, - |relation| relation.relate_with_variance(ty::Contravariant, - &a.region_bound, - &b.region_bound))?; - let nb = relation.relate(&a.builtin_bounds, &b.builtin_bounds)?; - let pb = relation.relate(&a.projection_bounds, &b.projection_bounds)?; - Ok(ty::ExistentialBounds { region_bound: r, - builtin_bounds: nb, - projection_bounds: pb }) - } -} - impl<'tcx> Relate<'tcx> for ty::BuiltinBounds { fn relate<'a, 'gcx, R>(relation: &mut R, a: &ty::BuiltinBounds, @@ -416,6 +342,23 @@ impl<'tcx> Relate<'tcx> for ty::TraitRef<'tcx> { } } +impl<'tcx> Relate<'tcx> for ty::ExistentialTraitRef<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &ty::ExistentialTraitRef<'tcx>, + b: &ty::ExistentialTraitRef<'tcx>) + -> RelateResult<'tcx, ty::ExistentialTraitRef<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + // Different traits cannot be related + if a.def_id != b.def_id { + Err(TypeError::Traits(expected_found(relation, &a.def_id, &b.def_id))) + } else { + let substs = relate_item_substs(relation, a.def_id, a.substs, b.substs)?; + Ok(ty::ExistentialTraitRef { def_id: a.def_id, substs: substs }) + } + } +} + impl<'tcx> Relate<'tcx> for Ty<'tcx> { fn relate<'a, 'gcx, R>(relation: &mut R, a: &Ty<'tcx>, @@ -466,30 +409,35 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, } (&ty::TyParam(ref a_p), &ty::TyParam(ref b_p)) - if a_p.idx == b_p.idx && a_p.space == b_p.space => + if a_p.idx == b_p.idx => { Ok(a) } - (&ty::TyEnum(a_def, a_substs), &ty::TyEnum(b_def, b_substs)) + (&ty::TyAdt(a_def, a_substs), &ty::TyAdt(b_def, b_substs)) if a_def == b_def => { let substs = relate_item_substs(relation, a_def.did, a_substs, b_substs)?; - Ok(tcx.mk_enum(a_def, substs)) + Ok(tcx.mk_adt(a_def, substs)) } - (&ty::TyTrait(ref a_), &ty::TyTrait(ref b_)) => + (&ty::TyTrait(ref a_obj), &ty::TyTrait(ref b_obj)) => { - let principal = relation.relate(&a_.principal, &b_.principal)?; - let bounds = relation.relate(&a_.bounds, &b_.bounds)?; - Ok(tcx.mk_trait(principal, bounds)) - } - - (&ty::TyStruct(a_def, a_substs), &ty::TyStruct(b_def, b_substs)) - if a_def == b_def => - { - let substs = relate_item_substs(relation, a_def.did, a_substs, b_substs)?; - Ok(tcx.mk_struct(a_def, substs)) + let principal = relation.relate(&a_obj.principal, &b_obj.principal)?; + let r = + relation.with_cause( + Cause::ExistentialRegionBound, + |relation| relation.relate_with_variance(ty::Contravariant, + &a_obj.region_bound, + &b_obj.region_bound))?; + let nb = relation.relate(&a_obj.builtin_bounds, &b_obj.builtin_bounds)?; + let pb = relation.relate(&a_obj.projection_bounds, &b_obj.projection_bounds)?; + Ok(tcx.mk_trait(ty::TraitObject { + principal: principal, + region_bound: r, + builtin_bounds: nb, + projection_bounds: pb + })) } (&ty::TyClosure(a_id, a_substs), @@ -517,9 +465,9 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, (&ty::TyRef(a_r, ref a_mt), &ty::TyRef(b_r, ref b_mt)) => { - let r = relation.relate_with_variance(ty::Contravariant, a_r, b_r)?; + let r = relation.relate_with_variance(ty::Contravariant, &a_r, &b_r)?; let mt = relation.relate(a_mt, b_mt)?; - Ok(tcx.mk_ref(tcx.mk_region(r), mt)) + Ok(tcx.mk_ref(r, mt)) } (&ty::TyArray(a_t, sz_a), &ty::TyArray(b_t, sz_b)) => @@ -615,11 +563,11 @@ impl<'tcx> Relate<'tcx> for &'tcx Substs<'tcx> { } } -impl<'tcx> Relate<'tcx> for ty::Region { +impl<'tcx> Relate<'tcx> for &'tcx ty::Region { fn relate<'a, 'gcx, R>(relation: &mut R, - a: &ty::Region, - b: &ty::Region) - -> RelateResult<'tcx, ty::Region> + a: &&'tcx ty::Region, + b: &&'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { relation.regions(*a, *b) diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs index 83413d16ff..1715185373 100644 --- a/src/librustc/ty/structural_impls.rs +++ b/src/librustc/ty/structural_impls.rs @@ -9,13 +9,11 @@ // except according to those terms. use infer::type_variable; -use ty::subst::{self, VecPerParamSpace}; -use ty::{self, Lift, TraitRef, Ty, TyCtxt}; +use ty::{self, Lift, Ty, TyCtxt}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use std::rc::Rc; use syntax::abi; -use syntax::ptr::P; use hir; @@ -73,17 +71,20 @@ impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Vec { } } -impl<'tcx> Lift<'tcx> for ty::Region { - type Lifted = Self; - fn lift_to_tcx(&self, _: TyCtxt) -> Option { - Some(*self) +impl<'a, 'tcx> Lift<'tcx> for ty::TraitRef<'a> { + type Lifted = ty::TraitRef<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.substs).map(|substs| ty::TraitRef { + def_id: self.def_id, + substs: substs + }) } } -impl<'a, 'tcx> Lift<'tcx> for TraitRef<'a> { - type Lifted = TraitRef<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option> { - tcx.lift(&self.substs).map(|substs| TraitRef { +impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialTraitRef<'a> { + type Lifted = ty::ExistentialTraitRef<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.substs).map(|substs| ty::ExistentialTraitRef { def_id: self.def_id, substs: substs }) @@ -141,6 +142,19 @@ impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> { } } +impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialProjection<'a> { + type Lifted = ty::ExistentialProjection<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&(self.trait_ref, self.ty)).map(|(trait_ref, ty)| { + ty::ExistentialProjection { + trait_ref: trait_ref, + item_name: self.item_name, + ty: ty + } + }) + } +} + impl<'a, 'tcx> Lift<'tcx> for ty::Predicate<'a> { type Lifted = ty::Predicate<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { @@ -163,9 +177,6 @@ impl<'a, 'tcx> Lift<'tcx> for ty::Predicate<'a> { ty::Predicate::WellFormed(ty) => { tcx.lift(&ty).map(ty::Predicate::WellFormed) } - ty::Predicate::Rfc1592(box ref a) => { - tcx.lift(a).map(|a| ty::Predicate::Rfc1592(Box::new(a))) - } ty::Predicate::ClosureKind(closure_def_id, kind) => { Some(ty::Predicate::ClosureKind(closure_def_id, kind)) } @@ -293,13 +304,21 @@ impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { FixedArraySize(x) => FixedArraySize(x), TyParamSize(x) => TyParamSize(x), ArgCount => ArgCount, - RegionsDoesNotOutlive(a, b) => RegionsDoesNotOutlive(a, b), - RegionsNotSame(a, b) => RegionsNotSame(a, b), - RegionsNoOverlap(a, b) => RegionsNoOverlap(a, b), + RegionsDoesNotOutlive(a, b) => { + return tcx.lift(&(a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b)) + } + RegionsNotSame(a, b) => { + return tcx.lift(&(a, b)).map(|(a, b)| RegionsNotSame(a, b)) + } + RegionsNoOverlap(a, b) => { + return tcx.lift(&(a, b)).map(|(a, b)| RegionsNoOverlap(a, b)) + } RegionsInsufficientlyPolymorphic(a, b) => { - RegionsInsufficientlyPolymorphic(a, b) + return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b)) + } + RegionsOverlyPolymorphic(a, b) => { + return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b)) } - RegionsOverlyPolymorphic(a, b) => RegionsOverlyPolymorphic(a, b), IntegerAsChar => IntegerAsChar, IntMismatch(x) => IntMismatch(x), FloatMismatch(x) => FloatMismatch(x), @@ -417,40 +436,24 @@ impl<'tcx, T:TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder { } } -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for P<[T]> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - self.iter().map(|t| t.fold_with(folder)).collect() - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.iter().any(|t| t.visit_with(visitor)) - } -} - -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for VecPerParamSpace { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - self.map(|elem| elem.fold_with(folder)) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.iter().any(|elem| elem.visit_with(visitor)) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::TraitTy<'tcx> { +impl<'tcx> TypeFoldable<'tcx> for ty::TraitObject<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::TraitTy { + ty::TraitObject { principal: self.principal.fold_with(folder), - bounds: self.bounds.fold_with(folder), + region_bound: self.region_bound.fold_with(folder), + builtin_bounds: self.builtin_bounds, + projection_bounds: self.projection_bounds.fold_with(folder), } } fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.principal.visit_with(visitor) || self.bounds.visit_with(visitor) + self.principal.visit_with(visitor) || + self.region_bound.visit_with(visitor) || + self.projection_bounds.visit_with(visitor) } } -impl<'tcx> TypeFoldable<'tcx> for &'tcx [Ty<'tcx>] { +impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { let tys = self.iter().map(|t| t.fold_with(folder)).collect(); folder.tcx().mk_type_list(tys) @@ -468,7 +471,7 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { ty::TyRawPtr(tm) => ty::TyRawPtr(tm.fold_with(folder)), ty::TyArray(typ, sz) => ty::TyArray(typ.fold_with(folder), sz), ty::TySlice(typ) => ty::TySlice(typ.fold_with(folder)), - ty::TyEnum(tid, substs) => ty::TyEnum(tid, substs.fold_with(folder)), + ty::TyAdt(tid, substs) => ty::TyAdt(tid, substs.fold_with(folder)), ty::TyTrait(ref trait_ty) => ty::TyTrait(trait_ty.fold_with(folder)), ty::TyTuple(ts) => ty::TyTuple(ts.fold_with(folder)), ty::TyFnDef(def_id, substs, f) => { @@ -480,7 +483,6 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { ty::TyRef(ref r, tm) => { ty::TyRef(r.fold_with(folder), tm.fold_with(folder)) } - ty::TyStruct(did, substs) => ty::TyStruct(did, substs.fold_with(folder)), ty::TyClosure(did, substs) => ty::TyClosure(did, substs.fold_with(folder)), ty::TyProjection(ref data) => ty::TyProjection(data.fold_with(folder)), ty::TyAnon(did, substs) => ty::TyAnon(did, substs.fold_with(folder)), @@ -501,7 +503,7 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { ty::TyRawPtr(ref tm) => tm.visit_with(visitor), ty::TyArray(typ, _sz) => typ.visit_with(visitor), ty::TySlice(typ) => typ.visit_with(visitor), - ty::TyEnum(_tid, ref substs) => substs.visit_with(visitor), + ty::TyAdt(_, substs) => substs.visit_with(visitor), ty::TyTrait(ref trait_ty) => trait_ty.visit_with(visitor), ty::TyTuple(ts) => ts.visit_with(visitor), ty::TyFnDef(_, substs, ref f) => { @@ -509,7 +511,6 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { } ty::TyFnPtr(ref f) => f.visit_with(visitor), ty::TyRef(r, ref tm) => r.visit_with(visitor) || tm.visit_with(visitor), - ty::TyStruct(_did, ref substs) => substs.visit_with(visitor), ty::TyClosure(_did, ref substs) => substs.visit_with(visitor), ty::TyProjection(ref data) => data.visit_with(visitor), ty::TyAnon(_, ref substs) => substs.visit_with(visitor), @@ -599,8 +600,17 @@ impl<'tcx> TypeFoldable<'tcx> for ty::TraitRef<'tcx> { } } - fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - folder.fold_trait_ref(self) + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.substs.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::ExistentialTraitRef<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::ExistentialTraitRef { + def_id: self.def_id, + substs: self.substs.fold_with(folder), + } } fn super_visit_with>(&self, visitor: &mut V) -> bool { @@ -629,7 +639,7 @@ impl<'tcx> TypeFoldable<'tcx> for ty::ImplHeader<'tcx> { } } -impl<'tcx> TypeFoldable<'tcx> for ty::Region { +impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Region { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _folder: &mut F) -> Self { *self } @@ -647,43 +657,6 @@ impl<'tcx> TypeFoldable<'tcx> for ty::Region { } } -impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Region { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _folder: &mut F) -> Self { - *self - } - - fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - let region = folder.fold_region(**self); - folder.tcx().mk_region(region) - } - - fn super_visit_with>(&self, _visitor: &mut V) -> bool { - false - } - - fn visit_with>(&self, visitor: &mut V) -> bool { - visitor.visit_region(**self) - } -} - -impl<'tcx> TypeFoldable<'tcx> for &'tcx subst::Substs<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - let substs = subst::Substs { - regions: self.regions.fold_with(folder), - types: self.types.fold_with(folder) - }; - folder.tcx().mk_substs(substs) - } - - fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - folder.fold_substs(self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.types.visit_with(visitor) || self.regions.visit_with(visitor) - } -} - impl<'tcx> TypeFoldable<'tcx> for ty::ClosureSubsts<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { ty::ClosureSubsts { @@ -741,30 +714,11 @@ impl<'tcx> TypeFoldable<'tcx> for ty::BuiltinBounds { } } -impl<'tcx> TypeFoldable<'tcx> for ty::ExistentialBounds<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::ExistentialBounds { - region_bound: self.region_bound.fold_with(folder), - builtin_bounds: self.builtin_bounds, - projection_bounds: self.projection_bounds.fold_with(folder), - } - } - - fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - folder.fold_existential_bounds(self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.region_bound.visit_with(visitor) || self.projection_bounds.visit_with(visitor) - } -} - impl<'tcx> TypeFoldable<'tcx> for ty::TypeParameterDef<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { ty::TypeParameterDef { name: self.name, def_id: self.def_id, - space: self.space, index: self.index, default: self.default.fold_with(folder), default_def_id: self.default_def_id, @@ -778,7 +732,7 @@ impl<'tcx> TypeFoldable<'tcx> for ty::TypeParameterDef<'tcx> { } } -impl<'tcx> TypeFoldable<'tcx> for ty::ObjectLifetimeDefault { +impl<'tcx> TypeFoldable<'tcx> for ty::ObjectLifetimeDefault<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { match *self { ty::ObjectLifetimeDefault::Ambiguous => @@ -800,12 +754,11 @@ impl<'tcx> TypeFoldable<'tcx> for ty::ObjectLifetimeDefault { } } -impl<'tcx> TypeFoldable<'tcx> for ty::RegionParameterDef { +impl<'tcx> TypeFoldable<'tcx> for ty::RegionParameterDef<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { ty::RegionParameterDef { name: self.name, def_id: self.def_id, - space: self.space, index: self.index, bounds: self.bounds.fold_with(folder), } @@ -816,38 +769,11 @@ impl<'tcx> TypeFoldable<'tcx> for ty::RegionParameterDef { } } -impl<'tcx> TypeFoldable<'tcx> for ty::Generics<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::Generics { - types: self.types.fold_with(folder), - regions: self.regions.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.types.visit_with(visitor) || self.regions.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::GenericPredicates<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::GenericPredicates { - predicates: self.predicates.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.predicates.visit_with(visitor) - } -} - impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { match *self { ty::Predicate::Trait(ref a) => ty::Predicate::Trait(a.fold_with(folder)), - ty::Predicate::Rfc1592(ref a) => - ty::Predicate::Rfc1592(a.fold_with(folder)), ty::Predicate::Equate(ref binder) => ty::Predicate::Equate(binder.fold_with(folder)), ty::Predicate::RegionOutlives(ref binder) => @@ -868,7 +794,6 @@ impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> { fn super_visit_with>(&self, visitor: &mut V) -> bool { match *self { ty::Predicate::Trait(ref a) => a.visit_with(visitor), - ty::Predicate::Rfc1592(ref a) => a.visit_with(visitor), ty::Predicate::Equate(ref binder) => binder.visit_with(visitor), ty::Predicate::RegionOutlives(ref binder) => binder.visit_with(visitor), ty::Predicate::TypeOutlives(ref binder) => binder.visit_with(visitor), @@ -893,6 +818,20 @@ impl<'tcx> TypeFoldable<'tcx> for ty::ProjectionPredicate<'tcx> { } } +impl<'tcx> TypeFoldable<'tcx> for ty::ExistentialProjection<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::ExistentialProjection { + trait_ref: self.trait_ref.fold_with(folder), + item_name: self.item_name, + ty: self.ty.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.trait_ref.visit_with(visitor) || self.ty.visit_with(visitor) + } +} + impl<'tcx> TypeFoldable<'tcx> for ty::ProjectionTy<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { ty::ProjectionTy { @@ -969,36 +908,6 @@ impl<'tcx> TypeFoldable<'tcx> for ty::ClosureUpvar<'tcx> { } } -impl<'tcx> TypeFoldable<'tcx> for ty::ParameterEnvironment<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::ParameterEnvironment { - free_substs: self.free_substs.fold_with(folder), - implicit_region_bound: self.implicit_region_bound.fold_with(folder), - caller_bounds: self.caller_bounds.fold_with(folder), - free_id_outlive: self.free_id_outlive, - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.free_substs.visit_with(visitor) || - self.implicit_region_bound.visit_with(visitor) || - self.caller_bounds.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::TypeScheme<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::TypeScheme { - generics: self.generics.fold_with(folder), - ty: self.ty.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.generics.visit_with(visitor) || self.ty.visit_with(visitor) - } -} - impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::error::ExpectedFound { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { ty::error::ExpectedFound { diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index 9680632ec4..302cab0446 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -10,29 +10,28 @@ //! This module contains TypeVariants and its major components -use middle::cstore; use hir::def_id::DefId; use middle::region; -use ty::subst::{self, Substs}; -use ty::{self, AdtDef, ToPredicate, TypeFlags, Ty, TyCtxt, TyS, TypeFoldable}; +use ty::subst::Substs; +use ty::{self, AdtDef, ToPredicate, TypeFlags, Ty, TyCtxt, TypeFoldable}; +use ty::{Slice, TyS}; use util::common::ErrorReported; use collections::enum_set::{self, EnumSet, CLike}; use std::fmt; use std::ops; -use std::mem; use syntax::abi; use syntax::ast::{self, Name}; -use syntax::parse::token::keywords; +use syntax::parse::token::{keywords, InternedString}; -use serialize::{Decodable, Decoder, Encodable, Encoder}; +use serialize; use hir; use self::InferTy::*; use self::TypeVariants::*; -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct TypeAndMut<'tcx> { pub ty: Ty<'tcx>, pub mutbl: hir::Mutability, @@ -88,7 +87,7 @@ pub enum Issue32330 { // NB: If you change this, you'll probably want to change the corresponding // AST structure in libsyntax/ast.rs as well. -#[derive(Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub enum TypeVariants<'tcx> { /// The primitive boolean type. Written as `bool`. TyBool, @@ -106,19 +105,13 @@ pub enum TypeVariants<'tcx> { /// A primitive floating-point type. For example, `f64`. TyFloat(ast::FloatTy), - /// An enumerated type, defined with `enum`. + /// Structures, enumerations and unions. /// /// Substs here, possibly against intuition, *may* contain `TyParam`s. /// That is, even after substitution it is possible that there are type - /// variables. This happens when the `TyEnum` corresponds to an enum - /// definition and not a concrete use of it. This is true for `TyStruct` - /// as well. - TyEnum(AdtDef<'tcx>, &'tcx Substs<'tcx>), - - /// A structure type, defined with `struct`. - /// - /// See warning about substitutions for enumerated types. - TyStruct(AdtDef<'tcx>, &'tcx Substs<'tcx>), + /// variables. This happens when the `TyAdt` corresponds to an ADT + /// definition and not a concrete use of it. + TyAdt(AdtDef<'tcx>, &'tcx Substs<'tcx>), /// `Box`; this is nominally a struct in the documentation, but is /// special-cased internally. For example, it is possible to implicitly @@ -152,7 +145,7 @@ pub enum TypeVariants<'tcx> { TyFnPtr(&'tcx BareFnTy<'tcx>), /// A trait, defined with `trait`. - TyTrait(Box>), + TyTrait(Box>), /// The anonymous type of a closure. Used to represent the type of /// `|a| a`. @@ -162,7 +155,7 @@ pub enum TypeVariants<'tcx> { TyNever, /// A tuple type. For example, `(i32, bool)`. - TyTuple(&'tcx [Ty<'tcx>]), + TyTuple(&'tcx Slice>), /// The projection of an associated type. For example, /// `>::N`. @@ -259,7 +252,7 @@ pub enum TypeVariants<'tcx> { /// closure C wind up influencing the decisions we ought to make for /// closure C (which would then require fixed point iteration to /// handle). Plus it fixes an ICE. :P -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct ClosureSubsts<'tcx> { /// Lifetime and type parameters from the enclosing function. /// These are separated out because trans wants to pass them around @@ -269,79 +262,15 @@ pub struct ClosureSubsts<'tcx> { /// The types of the upvars. The list parallels the freevars and /// `upvar_borrows` lists. These are kept distinct so that we can /// easily index into them. - pub upvar_tys: &'tcx [Ty<'tcx>] -} - -impl<'tcx> Encodable for ClosureSubsts<'tcx> { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - (self.func_substs, self.upvar_tys).encode(s) - } + pub upvar_tys: &'tcx Slice> } -impl<'tcx> Decodable for ClosureSubsts<'tcx> { - fn decode(d: &mut D) -> Result, D::Error> { - let (func_substs, upvar_tys) = Decodable::decode(d)?; - cstore::tls::with_decoding_context(d, |dcx, _| { - Ok(ClosureSubsts { - func_substs: func_substs, - upvar_tys: dcx.tcx().mk_type_list(upvar_tys) - }) - }) - } -} - -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct TraitTy<'tcx> { - pub principal: ty::PolyTraitRef<'tcx>, - pub bounds: ExistentialBounds<'tcx>, -} - -impl<'a, 'gcx, 'tcx> TraitTy<'tcx> { - pub fn principal_def_id(&self) -> DefId { - self.principal.0.def_id - } - - /// Object types don't have a self-type specified. Therefore, when - /// we convert the principal trait-ref into a normal trait-ref, - /// you must give *some* self-type. A common choice is `mk_err()` - /// or some skolemized type. - pub fn principal_trait_ref_with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, - self_ty: Ty<'tcx>) - -> ty::PolyTraitRef<'tcx> - { - // otherwise the escaping regions would be captured by the binder - assert!(!self_ty.has_escaping_regions()); - - ty::Binder(TraitRef { - def_id: self.principal.0.def_id, - substs: tcx.mk_substs(self.principal.0.substs.with_self_ty(self_ty)), - }) - } - - pub fn projection_bounds_with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, - self_ty: Ty<'tcx>) - -> Vec> - { - // otherwise the escaping regions would be captured by the binders - assert!(!self_ty.has_escaping_regions()); - - self.bounds.projection_bounds.iter() - .map(|in_poly_projection_predicate| { - let in_projection_ty = &in_poly_projection_predicate.0.projection_ty; - let substs = tcx.mk_substs(in_projection_ty.trait_ref.substs.with_self_ty(self_ty)); - let trait_ref = ty::TraitRef::new(in_projection_ty.trait_ref.def_id, - substs); - let projection_ty = ty::ProjectionTy { - trait_ref: trait_ref, - item_name: in_projection_ty.item_name - }; - ty::Binder(ty::ProjectionPredicate { - projection_ty: projection_ty, - ty: in_poly_projection_predicate.0.ty - }) - }) - .collect() - } +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct TraitObject<'tcx> { + pub principal: PolyExistentialTraitRef<'tcx>, + pub region_bound: &'tcx ty::Region, + pub builtin_bounds: BuiltinBounds, + pub projection_bounds: Vec>, } /// A complete reference to a trait. These take numerous guises in syntax, @@ -350,8 +279,8 @@ impl<'a, 'gcx, 'tcx> TraitTy<'tcx> { /// T : Foo /// /// This would be represented by a trait-reference where the def-id is the -/// def-id for the trait `Foo` and the substs defines `T` as parameter 0 in the -/// `SelfSpace` and `U` as parameter 0 in the `TypeSpace`. +/// def-id for the trait `Foo` and the substs define `T` as parameter 0, +/// and `U` as parameter 1. /// /// Trait references also appear in object types like `Foo`, but in /// that case the `Self` parameter is absent from the substitutions. @@ -359,7 +288,7 @@ impl<'a, 'gcx, 'tcx> TraitTy<'tcx> { /// Note that a `TraitRef` introduces a level of region binding, to /// account for higher-ranked trait bounds like `T : for<'a> Foo<&'a /// U>` or higher-ranked object types. -#[derive(Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct TraitRef<'tcx> { pub def_id: DefId, pub substs: &'tcx Substs<'tcx>, @@ -381,7 +310,7 @@ impl<'tcx> PolyTraitRef<'tcx> { self.0.substs } - pub fn input_types(&self) -> &[Ty<'tcx>] { + pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<> self.0.input_types() } @@ -392,6 +321,42 @@ impl<'tcx> PolyTraitRef<'tcx> { } } +/// An existential reference to a trait, where `Self` is erased. +/// For example, the trait object `Trait<'a, 'b, X, Y>` is: +/// +/// exists T. T: Trait<'a, 'b, X, Y> +/// +/// The substitutions don't include the erased `Self`, only trait +/// type and lifetime parameters (`[X, Y]` and `['a, 'b]` above). +#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct ExistentialTraitRef<'tcx> { + pub def_id: DefId, + pub substs: &'tcx Substs<'tcx>, +} + +impl<'tcx> ExistentialTraitRef<'tcx> { + pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { + // Select only the "input types" from a trait-reference. For + // now this is all the types that appear in the + // trait-reference, but it should eventually exclude + // associated types. + self.substs.types() + } +} + +pub type PolyExistentialTraitRef<'tcx> = Binder>; + +impl<'tcx> PolyExistentialTraitRef<'tcx> { + pub fn def_id(&self) -> DefId { + self.0.def_id + } + + pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { + // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<> + self.0.input_types() + } +} + /// Binder is a binder for higher-ranked lifetimes. It is part of the /// compiler's representation for things like `for<'a> Fn(&'a isize)` /// (which would be represented by the type `PolyTraitRef == @@ -399,7 +364,7 @@ impl<'tcx> PolyTraitRef<'tcx> { /// erase, or otherwise "discharge" these bound regions, we change the /// type from `Binder` to just `T` (see /// e.g. `liberate_late_bound_regions`). -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct Binder(pub T); impl Binder { @@ -447,7 +412,7 @@ impl fmt::Debug for TypeFlags { /// Represents the projection of an associated type. In explicit UFCS /// form this would be written `>::N`. -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct ProjectionTy<'tcx> { /// The trait reference `T as Trait<..>`. pub trait_ref: ty::TraitRef<'tcx>, @@ -456,20 +421,16 @@ pub struct ProjectionTy<'tcx> { pub item_name: Name, } -impl<'tcx> ProjectionTy<'tcx> { - pub fn sort_key(&self) -> (DefId, Name) { - (self.trait_ref.def_id, self.item_name) - } -} - -#[derive(Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct BareFnTy<'tcx> { pub unsafety: hir::Unsafety, pub abi: abi::Abi, pub sig: PolyFnSig<'tcx>, } -#[derive(Clone, PartialEq, Eq, Hash)] +impl<'tcx> serialize::UseSpecializedDecodable for &'tcx BareFnTy<'tcx> {} + +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct ClosureTy<'tcx> { pub unsafety: hir::Unsafety, pub abi: abi::Abi, @@ -482,7 +443,7 @@ pub struct ClosureTy<'tcx> { /// - `inputs` is the list of arguments and their modes. /// - `output` is the return type. /// - `variadic` indicates whether this is a variadic function. (only true for foreign fns) -#[derive(Clone, PartialEq, Eq, Hash)] +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct FnSig<'tcx> { pub inputs: Vec>, pub output: Ty<'tcx>, @@ -506,35 +467,36 @@ impl<'tcx> PolyFnSig<'tcx> { } } -#[derive(Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct ParamTy { - pub space: subst::ParamSpace, pub idx: u32, pub name: Name, } impl<'a, 'gcx, 'tcx> ParamTy { - pub fn new(space: subst::ParamSpace, - index: u32, - name: Name) - -> ParamTy { - ParamTy { space: space, idx: index, name: name } + pub fn new(index: u32, name: Name) -> ParamTy { + ParamTy { idx: index, name: name } } pub fn for_self() -> ParamTy { - ParamTy::new(subst::SelfSpace, 0, keywords::SelfType.name()) + ParamTy::new(0, keywords::SelfType.name()) } pub fn for_def(def: &ty::TypeParameterDef) -> ParamTy { - ParamTy::new(def.space, def.index, def.name) + ParamTy::new(def.index, def.name) } pub fn to_ty(self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { - tcx.mk_param(self.space, self.idx, self.name) + tcx.mk_param(self.idx, self.name) } pub fn is_self(&self) -> bool { - self.space == subst::SelfSpace && self.idx == 0 + if self.name == keywords::SelfType.name() { + assert_eq!(self.idx, 0); + true + } else { + false + } } } @@ -684,24 +646,25 @@ pub enum Region { ReErased, } +impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Region {} + #[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)] pub struct EarlyBoundRegion { - pub space: subst::ParamSpace, pub index: u32, pub name: Name, } -#[derive(Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct TyVid { pub index: u32, } -#[derive(Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct IntVid { pub index: u32 } -#[derive(Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct FloatVid { pub index: u32 } @@ -716,7 +679,7 @@ pub struct SkolemizedRegionVid { pub index: u32 } -#[derive(Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum InferTy { TyVar(TyVid), IntVar(IntVid), @@ -730,31 +693,53 @@ pub enum InferTy { FreshFloatTy(u32) } -/// Bounds suitable for an existentially quantified type parameter -/// such as those that appear in object types or closure types. -#[derive(PartialEq, Eq, Hash, Clone)] -pub struct ExistentialBounds<'tcx> { - pub region_bound: ty::Region, - pub builtin_bounds: BuiltinBounds, - pub projection_bounds: Vec>, +/// A `ProjectionPredicate` for an `ExistentialTraitRef`. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct ExistentialProjection<'tcx> { + pub trait_ref: ExistentialTraitRef<'tcx>, + pub item_name: Name, + pub ty: Ty<'tcx> } -impl<'tcx> ExistentialBounds<'tcx> { - pub fn new(region_bound: ty::Region, - builtin_bounds: BuiltinBounds, - projection_bounds: Vec>) - -> Self { - let mut projection_bounds = projection_bounds; - projection_bounds.sort_by(|a, b| a.sort_key().cmp(&b.sort_key())); - ExistentialBounds { - region_bound: region_bound, - builtin_bounds: builtin_bounds, - projection_bounds: projection_bounds - } +pub type PolyExistentialProjection<'tcx> = Binder>; + +impl<'a, 'tcx, 'gcx> PolyExistentialProjection<'tcx> { + pub fn item_name(&self) -> Name { + self.0.item_name // safe to skip the binder to access a name + } + + pub fn sort_key(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> (u64, InternedString) { + // We want something here that is stable across crate boundaries. + // The DefId isn't but the `deterministic_hash` of the corresponding + // DefPath is. + let trait_def = tcx.lookup_trait_def(self.0.trait_ref.def_id); + let def_path_hash = trait_def.def_path_hash; + + // An `ast::Name` is also not stable (it's just an index into an + // interning table), so map to the corresponding `InternedString`. + let item_name = self.0.item_name.as_str(); + (def_path_hash, item_name) + } + + pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + self_ty: Ty<'tcx>) + -> ty::PolyProjectionPredicate<'tcx> + { + // otherwise the escaping regions would be captured by the binders + assert!(!self_ty.has_escaping_regions()); + + let trait_ref = self.map_bound(|proj| proj.trait_ref); + self.map_bound(|proj| ty::ProjectionPredicate { + projection_ty: ty::ProjectionTy { + trait_ref: trait_ref.with_self_ty(tcx, self_ty).0, + item_name: proj.item_name + }, + ty: proj.ty + }) } } -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct BuiltinBounds(EnumSet); impl<'a, 'gcx, 'tcx> BuiltinBounds { @@ -797,12 +782,11 @@ impl<'a> IntoIterator for &'a BuiltinBounds { #[derive(Clone, RustcEncodable, PartialEq, Eq, RustcDecodable, Hash, Debug, Copy)] -#[repr(usize)] pub enum BuiltinBound { - Send, - Sized, - Copy, - Sync, + Send = 0, + Sized = 1, + Copy = 2, + Sync = 3, } impl CLike for BuiltinBound { @@ -810,7 +794,13 @@ impl CLike for BuiltinBound { *self as usize } fn from_usize(v: usize) -> BuiltinBound { - unsafe { mem::transmute(v) } + match v { + 0 => BuiltinBound::Send, + 1 => BuiltinBound::Sized, + 2 => BuiltinBound::Copy, + 3 => BuiltinBound::Sync, + _ => bug!("{} is not a valid BuiltinBound", v) + } } } @@ -905,7 +895,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { // FIXME(#24885): be smarter here, the AdtDefData::is_empty method could easily be made // more complete. match self.sty { - TyEnum(def, _) | TyStruct(def, _) => def.is_empty(), + TyAdt(def, _) => def.is_empty(), // FIXME(canndrew): There's no reason why these can't be uncommented, they're tested // and they don't break anything. But I'm keeping my changes small for now. @@ -933,7 +923,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } pub fn is_phantom_data(&self) -> bool { - if let TyStruct(def, _) = self.sty { + if let TyAdt(def, _) = self.sty { def.is_phantom_data() } else { false @@ -942,16 +932,16 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn is_bool(&self) -> bool { self.sty == TyBool } - pub fn is_param(&self, space: subst::ParamSpace, index: u32) -> bool { + pub fn is_param(&self, index: u32) -> bool { match self.sty { - ty::TyParam(ref data) => data.space == space && data.idx == index, + ty::TyParam(ref data) => data.idx == index, _ => false, } } pub fn is_self(&self) -> bool { match self.sty { - TyParam(ref p) => p.space == subst::SelfSpace, + TyParam(ref p) => p.is_self(), _ => false } } @@ -968,8 +958,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn is_structural(&self) -> bool { match self.sty { - TyStruct(..) | TyTuple(_) | TyEnum(..) | - TyArray(..) | TyClosure(..) => true, + TyAdt(..) | TyTuple(..) | TyArray(..) | TyClosure(..) => true, _ => self.is_slice() | self.is_trait() } } @@ -977,7 +966,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { #[inline] pub fn is_simd(&self) -> bool { match self.sty { - TyStruct(def, _) => def.is_simd(), + TyAdt(def, _) => def.is_simd(), _ => false } } @@ -992,7 +981,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn simd_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { match self.sty { - TyStruct(def, substs) => { + TyAdt(def, substs) => { def.struct_variant().fields[0].ty(tcx, substs) } _ => bug!("simd_type called on invalid type") @@ -1001,7 +990,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn simd_size(&self, _cx: TyCtxt) -> usize { match self.sty { - TyStruct(def, _) => def.struct_variant().fields.len(), + TyAdt(def, _) => def.struct_variant().fields.len(), _ => bug!("simd_size called on invalid type") } } @@ -1154,7 +1143,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn fn_sig(&self) -> &'tcx PolyFnSig<'tcx> { match self.sty { - TyFnDef(_, _, ref f) | TyFnPtr(ref f) => &f.sig, + TyFnDef(.., ref f) | TyFnPtr(ref f) => &f.sig, _ => bug!("Ty::fn_sig() called on non-fn type: {:?}", self) } } @@ -1162,7 +1151,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { /// Returns the ABI of the given function. pub fn fn_abi(&self) -> abi::Abi { match self.sty { - TyFnDef(_, _, ref f) | TyFnPtr(ref f) => f.abi, + TyFnDef(.., ref f) | TyFnPtr(ref f) => f.abi, _ => bug!("Ty::fn_abi() called on non-fn type"), } } @@ -1185,9 +1174,8 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn ty_to_def_id(&self) -> Option { match self.sty { - TyTrait(ref tt) => Some(tt.principal_def_id()), - TyStruct(def, _) | - TyEnum(def, _) => Some(def.did), + TyTrait(ref tt) => Some(tt.principal.def_id()), + TyAdt(def, _) => Some(def.did), TyClosure(id, _) => Some(id), _ => None } @@ -1195,7 +1183,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn ty_adt_def(&self) -> Option> { match self.sty { - TyStruct(adt, _) | TyEnum(adt, _) => Some(adt), + TyAdt(adt, _) => Some(adt), _ => None } } @@ -1203,27 +1191,24 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { /// Returns the regions directly referenced from this type (but /// not types reachable from this type via `walk_tys`). This /// ignores late-bound regions binders. - pub fn regions(&self) -> Vec { + pub fn regions(&self) -> Vec<&'tcx ty::Region> { match self.sty { TyRef(region, _) => { - vec![*region] + vec![region] } TyTrait(ref obj) => { - let mut v = vec![obj.bounds.region_bound]; - v.extend_from_slice(obj.principal.skip_binder() - .substs.regions.as_slice()); + let mut v = vec![obj.region_bound]; + v.extend(obj.principal.skip_binder().substs.regions()); v } - TyEnum(_, substs) | - TyStruct(_, substs) | - TyAnon(_, substs) => { - substs.regions.as_slice().to_vec() + TyAdt(_, substs) | TyAnon(_, substs) => { + substs.regions().collect() } TyClosure(_, ref substs) => { - substs.func_substs.regions.as_slice().to_vec() + substs.func_substs.regions().collect() } TyProjection(ref data) => { - data.trait_ref.substs.regions.as_slice().to_vec() + data.trait_ref.substs.regions().collect() } TyFnDef(..) | TyFnPtr(_) | @@ -1234,7 +1219,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { TyFloat(_) | TyBox(_) | TyStr | - TyArray(_, _) | + TyArray(..) | TySlice(_) | TyRawPtr(_) | TyNever | diff --git a/src/librustc/ty/subst.rs b/src/librustc/ty/subst.rs index 595d965ffc..6911d21742 100644 --- a/src/librustc/ty/subst.rs +++ b/src/librustc/ty/subst.rs @@ -10,518 +10,326 @@ // Type substitutions. -pub use self::ParamSpace::*; - -use middle::cstore; use hir::def_id::DefId; use ty::{self, Ty, TyCtxt}; -use ty::fold::{TypeFoldable, TypeFolder}; +use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; -use serialize::{Encodable, Encoder, Decodable, Decoder}; -use std::fmt; -use std::iter::IntoIterator; -use std::slice::Iter; -use std::vec::{Vec, IntoIter}; +use serialize::{self, Encodable, Encoder, Decodable, Decoder}; use syntax_pos::{Span, DUMMY_SP}; -/////////////////////////////////////////////////////////////////////////// - -/// A substitution mapping type/region parameters to new values. We -/// identify each in-scope parameter by an *index* and a *parameter -/// space* (which indices where the parameter is defined; see -/// `ParamSpace`). -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct Substs<'tcx> { - pub types: VecPerParamSpace>, - pub regions: VecPerParamSpace, +use core::nonzero::NonZero; +use std::fmt; +use std::iter; +use std::marker::PhantomData; +use std::mem; + +/// An entity in the Rust typesystem, which can be one of +/// several kinds (only types and lifetimes for now). +/// To reduce memory usage, a `Kind` is a interned pointer, +/// with the lowest 2 bits being reserved for a tag to +/// indicate the type (`Ty` or `Region`) it points to. +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +pub struct Kind<'tcx> { + ptr: NonZero, + marker: PhantomData<(Ty<'tcx>, &'tcx ty::Region)> } -impl<'a, 'gcx, 'tcx> Substs<'tcx> { - pub fn new(t: VecPerParamSpace>, - r: VecPerParamSpace) - -> Substs<'tcx> - { - Substs { types: t, regions: r } - } - - pub fn new_type(t: Vec>, - r: Vec) - -> Substs<'tcx> - { - Substs::new(VecPerParamSpace::new(t, Vec::new(), Vec::new()), - VecPerParamSpace::new(r, Vec::new(), Vec::new())) - } - - pub fn new_trait(t: Vec>, - r: Vec, - s: Ty<'tcx>) - -> Substs<'tcx> - { - Substs::new(VecPerParamSpace::new(t, vec!(s), Vec::new()), - VecPerParamSpace::new(r, Vec::new(), Vec::new())) - } - - pub fn empty() -> Substs<'tcx> { - Substs { - types: VecPerParamSpace::empty(), - regions: VecPerParamSpace::empty(), +const TAG_MASK: usize = 0b11; +const TYPE_TAG: usize = 0b00; +const REGION_TAG: usize = 0b01; + +impl<'tcx> From> for Kind<'tcx> { + fn from(ty: Ty<'tcx>) -> Kind<'tcx> { + // Ensure we can use the tag bits. + assert_eq!(mem::align_of_val(ty) & TAG_MASK, 0); + + let ptr = ty as *const _ as usize; + Kind { + ptr: unsafe { + NonZero::new(ptr | TYPE_TAG) + }, + marker: PhantomData } } - - pub fn is_noop(&self) -> bool { - self.regions.is_empty() && self.types.is_empty() - } - - pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> { - *self.types.get(ty_param_def.space, ty_param_def.index as usize) - } - - pub fn self_ty(&self) -> Option> { - self.types.get_self().cloned() - } - - pub fn with_self_ty(&self, self_ty: Ty<'tcx>) -> Substs<'tcx> { - assert!(self.self_ty().is_none()); - let mut s = (*self).clone(); - s.types.push(SelfSpace, self_ty); - s - } - - pub fn erase_regions(self) -> Substs<'tcx> { - let Substs { types, regions } = self; - let regions = regions.map(|_| ty::ReErased); - Substs { types: types, regions: regions } - } - - pub fn with_method(self, - m_types: Vec>, - m_regions: Vec) - -> Substs<'tcx> - { - let Substs { types, regions } = self; - let types = types.with_slice(FnSpace, &m_types); - let regions = regions.with_slice(FnSpace, &m_regions); - Substs { types: types, regions: regions } - } - - pub fn with_method_from(&self, - meth_substs: &Substs<'tcx>) - -> Substs<'tcx> - { - let Substs { types, regions } = self.clone(); - let types = types.with_slice(FnSpace, meth_substs.types.get_slice(FnSpace)); - let regions = regions.with_slice(FnSpace, meth_substs.regions.get_slice(FnSpace)); - Substs { types: types, regions: regions } - } - - pub fn with_method_from_subst(&self, other: &Substs<'tcx>) -> Substs<'tcx> { - let Substs { types, regions } = self.clone(); - let types = types.with_slice(FnSpace, other.types.get_slice(FnSpace)); - let regions = regions.with_slice(FnSpace, other.regions.get_slice(FnSpace)); - Substs { types: types, regions: regions } - } - - /// Creates a trait-ref out of this substs, ignoring the FnSpace substs - pub fn to_trait_ref(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_id: DefId) - -> ty::TraitRef<'tcx> { - let Substs { mut types, mut regions } = self.clone(); - types.truncate(FnSpace, 0); - regions.truncate(FnSpace, 0); - - ty::TraitRef { - def_id: trait_id, - substs: tcx.mk_substs(Substs { types: types, regions: regions }) - } - } -} - -impl<'tcx> Encodable for Substs<'tcx> { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - cstore::tls::with_encoding_context(s, |ecx, rbml_w| { - ecx.encode_substs(rbml_w, self); - Ok(()) - }) - } -} - -impl<'tcx> Decodable for Substs<'tcx> { - fn decode(d: &mut D) -> Result, D::Error> { - cstore::tls::with_decoding_context(d, |dcx, rbml_r| { - Ok(dcx.decode_substs(rbml_r)) - }) - } -} - -impl<'tcx> Decodable for &'tcx Substs<'tcx> { - fn decode(d: &mut D) -> Result<&'tcx Substs<'tcx>, D::Error> { - let substs = cstore::tls::with_decoding_context(d, |dcx, rbml_r| { - let substs = dcx.decode_substs(rbml_r); - dcx.tcx().mk_substs(substs) - }); - - Ok(substs) - } } -/////////////////////////////////////////////////////////////////////////// -// ParamSpace - -#[derive(PartialOrd, Ord, PartialEq, Eq, Copy, - Clone, Hash, RustcEncodable, RustcDecodable, Debug)] -pub enum ParamSpace { - TypeSpace, // Type parameters attached to a type definition, trait, or impl - SelfSpace, // Self parameter on a trait - FnSpace, // Type parameters attached to a method or fn -} - -impl ParamSpace { - pub fn all() -> [ParamSpace; 3] { - [TypeSpace, SelfSpace, FnSpace] - } - - pub fn to_uint(self) -> usize { - match self { - TypeSpace => 0, - SelfSpace => 1, - FnSpace => 2, - } - } - - pub fn from_uint(u: usize) -> ParamSpace { - match u { - 0 => TypeSpace, - 1 => SelfSpace, - 2 => FnSpace, - _ => bug!("Invalid ParamSpace: {}", u) +impl<'tcx> From<&'tcx ty::Region> for Kind<'tcx> { + fn from(r: &'tcx ty::Region) -> Kind<'tcx> { + // Ensure we can use the tag bits. + assert_eq!(mem::align_of_val(r) & TAG_MASK, 0); + + let ptr = r as *const _ as usize; + Kind { + ptr: unsafe { + NonZero::new(ptr | REGION_TAG) + }, + marker: PhantomData } } } -/// Vector of things sorted by param space. Used to keep -/// the set of things declared on the type, self, or method -/// distinct. -#[derive(PartialEq, Eq, Clone, Hash, RustcEncodable, RustcDecodable)] -pub struct VecPerParamSpace { - // This was originally represented as a tuple with one Vec for - // each variant of ParamSpace, and that remains the abstraction - // that it provides to its clients. - // - // Here is how the representation corresponds to the abstraction - // i.e. the "abstraction function" AF: - // - // AF(self) = (self.content[..self.type_limit], - // self.content[self.type_limit..self.self_limit], - // self.content[self.self_limit..]) - type_limit: usize, - self_limit: usize, - content: Vec, -} - -/// The `split` function converts one `VecPerParamSpace` into this -/// `SeparateVecsPerParamSpace` structure. -pub struct SeparateVecsPerParamSpace { - pub types: Vec, - pub selfs: Vec, - pub fns: Vec, -} - -impl fmt::Debug for VecPerParamSpace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "[{:?};{:?};{:?}]", - self.get_slice(TypeSpace), - self.get_slice(SelfSpace), - self.get_slice(FnSpace)) - } -} - -impl VecPerParamSpace { - fn limits(&self, space: ParamSpace) -> (usize, usize) { - match space { - TypeSpace => (0, self.type_limit), - SelfSpace => (self.type_limit, self.self_limit), - FnSpace => (self.self_limit, self.content.len()), +impl<'tcx> Kind<'tcx> { + #[inline] + unsafe fn downcast(self, tag: usize) -> Option<&'tcx T> { + let ptr = *self.ptr; + if ptr & TAG_MASK == tag { + Some(&*((ptr & !TAG_MASK) as *const _)) + } else { + None } } - pub fn empty() -> VecPerParamSpace { - VecPerParamSpace { - type_limit: 0, - self_limit: 0, - content: Vec::new() + #[inline] + pub fn as_type(self) -> Option> { + unsafe { + self.downcast(TYPE_TAG) } } - /// `t` is the type space. - /// `s` is the self space. - /// `f` is the fn space. - pub fn new(t: Vec, s: Vec, f: Vec) -> VecPerParamSpace { - let type_limit = t.len(); - let self_limit = type_limit + s.len(); - - let mut content = t; - content.extend(s); - content.extend(f); - - VecPerParamSpace { - type_limit: type_limit, - self_limit: self_limit, - content: content, + #[inline] + pub fn as_region(self) -> Option<&'tcx ty::Region> { + unsafe { + self.downcast(REGION_TAG) } } +} - fn new_internal(content: Vec, type_limit: usize, self_limit: usize) - -> VecPerParamSpace - { - VecPerParamSpace { - type_limit: type_limit, - self_limit: self_limit, - content: content, +impl<'tcx> fmt::Debug for Kind<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if let Some(ty) = self.as_type() { + write!(f, "{:?}", ty) + } else if let Some(r) = self.as_region() { + write!(f, "{:?}", r) + } else { + write!(f, "", *self.ptr as *const ()) } } +} - /// Appends `value` to the vector associated with `space`. - /// - /// Unlike the `push` method in `Vec`, this should not be assumed - /// to be a cheap operation (even when amortized over many calls). - pub fn push(&mut self, space: ParamSpace, value: T) { - let (_, limit) = self.limits(space); - match space { - TypeSpace => { self.type_limit += 1; self.self_limit += 1; } - SelfSpace => { self.self_limit += 1; } - FnSpace => { } +impl<'tcx> TypeFoldable<'tcx> for Kind<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + if let Some(ty) = self.as_type() { + Kind::from(ty.fold_with(folder)) + } else if let Some(r) = self.as_region() { + Kind::from(r.fold_with(folder)) + } else { + bug!() } - self.content.insert(limit, value); } - /// Appends `values` to the vector associated with `space`. - /// - /// Unlike the `extend` method in `Vec`, this should not be assumed - /// to be a cheap operation (even when amortized over many calls). - pub fn extend>(&mut self, space: ParamSpace, values: I) { - // This could be made more efficient, obviously. - for item in values { - self.push(space, item); + fn super_visit_with>(&self, visitor: &mut V) -> bool { + if let Some(ty) = self.as_type() { + ty.visit_with(visitor) + } else if let Some(r) = self.as_region() { + r.visit_with(visitor) + } else { + bug!() } } +} - pub fn pop(&mut self, space: ParamSpace) -> Option { - let (start, limit) = self.limits(space); - if start == limit { - None - } else { - match space { - TypeSpace => { self.type_limit -= 1; self.self_limit -= 1; } - SelfSpace => { self.self_limit -= 1; } - FnSpace => {} - } - if self.content.is_empty() { - None +impl<'tcx> Encodable for Kind<'tcx> { + fn encode(&self, e: &mut E) -> Result<(), E::Error> { + e.emit_enum("Kind", |e| { + if let Some(ty) = self.as_type() { + e.emit_enum_variant("Ty", TYPE_TAG, 1, |e| { + e.emit_enum_variant_arg(0, |e| ty.encode(e)) + }) + } else if let Some(r) = self.as_region() { + e.emit_enum_variant("Region", REGION_TAG, 1, |e| { + e.emit_enum_variant_arg(0, |e| r.encode(e)) + }) } else { - Some(self.content.remove(limit - 1)) + bug!() } - } - } - - pub fn truncate(&mut self, space: ParamSpace, len: usize) { - // FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n). - while self.len(space) > len { - self.pop(space); - } - } - - pub fn replace(&mut self, space: ParamSpace, elems: Vec) { - // FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n). - self.truncate(space, 0); - for t in elems { - self.push(space, t); - } - } - - pub fn get_self<'a>(&'a self) -> Option<&'a T> { - let v = self.get_slice(SelfSpace); - assert!(v.len() <= 1); - if v.is_empty() { None } else { Some(&v[0]) } - } - - pub fn len(&self, space: ParamSpace) -> usize { - self.get_slice(space).len() - } - - pub fn is_empty_in(&self, space: ParamSpace) -> bool { - self.len(space) == 0 - } - - pub fn get_slice<'a>(&'a self, space: ParamSpace) -> &'a [T] { - let (start, limit) = self.limits(space); - &self.content[start.. limit] + }) } +} - pub fn get_mut_slice<'a>(&'a mut self, space: ParamSpace) -> &'a mut [T] { - let (start, limit) = self.limits(space); - &mut self.content[start.. limit] +impl<'tcx> Decodable for Kind<'tcx> { + fn decode(d: &mut D) -> Result, D::Error> { + d.read_enum("Kind", |d| { + d.read_enum_variant(&["Ty", "Region"], |d, tag| { + match tag { + TYPE_TAG => Ty::decode(d).map(Kind::from), + REGION_TAG => <&ty::Region>::decode(d).map(Kind::from), + _ => Err(d.error("invalid Kind tag")) + } + }) + }) } +} - pub fn opt_get<'a>(&'a self, - space: ParamSpace, - index: usize) - -> Option<&'a T> { - let v = self.get_slice(space); - if index < v.len() { Some(&v[index]) } else { None } - } +/// A substitution mapping type/region parameters to new values. +#[derive(Clone, PartialEq, Eq, Debug, Hash, RustcEncodable, RustcDecodable)] +pub struct Substs<'tcx> { + params: Vec> +} - pub fn get<'a>(&'a self, space: ParamSpace, index: usize) -> &'a T { - &self.get_slice(space)[index] +impl<'a, 'gcx, 'tcx> Substs<'tcx> { + pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, params: I) + -> &'tcx Substs<'tcx> + where I: IntoIterator> { + tcx.mk_substs(Substs { + params: params.into_iter().collect() + }) } - pub fn iter<'a>(&'a self) -> Iter<'a,T> { - self.content.iter() + pub fn maybe_new(tcx: TyCtxt<'a, 'gcx, 'tcx>, params: I) + -> Result<&'tcx Substs<'tcx>, E> + where I: IntoIterator, E>> { + Ok(tcx.mk_substs(Substs { + params: params.into_iter().collect::>()? + })) } - pub fn into_iter(self) -> IntoIter { - self.content.into_iter() - } + pub fn new_trait(tcx: TyCtxt<'a, 'gcx, 'tcx>, + s: Ty<'tcx>, + t: &[Ty<'tcx>]) + -> &'tcx Substs<'tcx> + { + let t = iter::once(s).chain(t.iter().cloned()); + Substs::new(tcx, t.map(Kind::from)) + } + + pub fn empty(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> &'tcx Substs<'tcx> { + Substs::new(tcx, vec![]) + } + + /// Creates a Substs for generic parameter definitions, + /// by calling closures to obtain each region and type. + /// The closures get to observe the Substs as they're + /// being built, which can be used to correctly + /// substitute defaults of type parameters. + pub fn for_item(tcx: TyCtxt<'a, 'gcx, 'tcx>, + def_id: DefId, + mut mk_region: FR, + mut mk_type: FT) + -> &'tcx Substs<'tcx> + where FR: FnMut(&ty::RegionParameterDef, &Substs<'tcx>) -> &'tcx ty::Region, + FT: FnMut(&ty::TypeParameterDef<'tcx>, &Substs<'tcx>) -> Ty<'tcx> { + let defs = tcx.lookup_generics(def_id); + let mut substs = Substs { + params: Vec::with_capacity(defs.count()) + }; - pub fn iter_enumerated<'a>(&'a self) -> EnumeratedItems<'a,T> { - EnumeratedItems::new(self) - } + substs.fill_item(tcx, defs, &mut mk_region, &mut mk_type); - pub fn as_slice(&self) -> &[T] { - &self.content + tcx.mk_substs(substs) } - pub fn into_vec(self) -> Vec { - self.content - } + fn fill_item(&mut self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + defs: &ty::Generics<'tcx>, + mk_region: &mut FR, + mk_type: &mut FT) + where FR: FnMut(&ty::RegionParameterDef, &Substs<'tcx>) -> &'tcx ty::Region, + FT: FnMut(&ty::TypeParameterDef<'tcx>, &Substs<'tcx>) -> Ty<'tcx> { + if let Some(def_id) = defs.parent { + let parent_defs = tcx.lookup_generics(def_id); + self.fill_item(tcx, parent_defs, mk_region, mk_type); + } - pub fn all_vecs

(&self, mut pred: P) -> bool where - P: FnMut(&[T]) -> bool, - { - let spaces = [TypeSpace, SelfSpace, FnSpace]; - spaces.iter().all(|&space| { pred(self.get_slice(space)) }) - } + // Handle Self first, before all regions. + let mut types = defs.types.iter(); + if defs.parent.is_none() && defs.has_self { + let def = types.next().unwrap(); + let ty = mk_type(def, self); + assert_eq!(def.index as usize, self.params.len()); + self.params.push(Kind::from(ty)); + } - pub fn all

(&self, pred: P) -> bool where P: FnMut(&T) -> bool { - self.iter().all(pred) - } + for def in &defs.regions { + let region = mk_region(def, self); + assert_eq!(def.index as usize, self.params.len()); + self.params.push(Kind::from(region)); + } - pub fn any

(&self, pred: P) -> bool where P: FnMut(&T) -> bool { - self.iter().any(pred) + for def in types { + let ty = mk_type(def, self); + assert_eq!(def.index as usize, self.params.len()); + self.params.push(Kind::from(ty)); + } } - pub fn is_empty(&self) -> bool { - self.all_vecs(|v| v.is_empty()) + pub fn is_noop(&self) -> bool { + self.params.is_empty() } - pub fn map(&self, pred: P) -> VecPerParamSpace where P: FnMut(&T) -> U { - let result = self.iter().map(pred).collect(); - VecPerParamSpace::new_internal(result, - self.type_limit, - self.self_limit) + #[inline] + pub fn params(&self) -> &[Kind<'tcx>] { + &self.params } - pub fn map_enumerated(&self, pred: P) -> VecPerParamSpace where - P: FnMut((ParamSpace, usize, &T)) -> U, - { - let result = self.iter_enumerated().map(pred).collect(); - VecPerParamSpace::new_internal(result, - self.type_limit, - self.self_limit) + #[inline] + pub fn types(&'a self) -> impl DoubleEndedIterator> + 'a { + self.params.iter().filter_map(|k| k.as_type()) } - pub fn split(self) -> SeparateVecsPerParamSpace { - let VecPerParamSpace { type_limit, self_limit, content } = self; - - let mut content_iter = content.into_iter(); - - SeparateVecsPerParamSpace { - types: content_iter.by_ref().take(type_limit).collect(), - selfs: content_iter.by_ref().take(self_limit - type_limit).collect(), - fns: content_iter.collect() - } + #[inline] + pub fn regions(&'a self) -> impl DoubleEndedIterator + 'a { + self.params.iter().filter_map(|k| k.as_region()) } - pub fn with_slice(mut self, space: ParamSpace, slice: &[T]) - -> VecPerParamSpace - where T: Clone - { - assert!(self.is_empty_in(space)); - for t in slice { - self.push(space, t.clone()); - } - - self + #[inline] + pub fn type_at(&self, i: usize) -> Ty<'tcx> { + self.params[i].as_type().unwrap_or_else(|| { + bug!("expected type for param #{} in {:?}", i, self.params); + }) } -} -#[derive(Clone)] -pub struct EnumeratedItems<'a,T:'a> { - vec: &'a VecPerParamSpace, - space_index: usize, - elem_index: usize -} - -impl<'a,T> EnumeratedItems<'a,T> { - fn new(v: &'a VecPerParamSpace) -> EnumeratedItems<'a,T> { - let mut result = EnumeratedItems { vec: v, space_index: 0, elem_index: 0 }; - result.adjust_space(); - result + #[inline] + pub fn region_at(&self, i: usize) -> &'tcx ty::Region { + self.params[i].as_region().unwrap_or_else(|| { + bug!("expected region for param #{} in {:?}", i, self.params); + }) } - fn adjust_space(&mut self) { - let spaces = ParamSpace::all(); - while - self.space_index < spaces.len() && - self.elem_index >= self.vec.len(spaces[self.space_index]) - { - self.space_index += 1; - self.elem_index = 0; - } + #[inline] + pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> { + self.type_at(ty_param_def.index as usize) + } + + #[inline] + pub fn region_for_def(&self, def: &ty::RegionParameterDef) -> &'tcx ty::Region { + self.region_at(def.index as usize) + } + + /// Transform from substitutions for a child of `source_ancestor` + /// (e.g. a trait or impl) to substitutions for the same child + /// in a different item, with `target_substs` as the base for + /// the target impl/trait, with the source child-specific + /// parameters (e.g. method parameters) on top of that base. + pub fn rebase_onto(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + source_ancestor: DefId, + target_substs: &Substs<'tcx>) + -> &'tcx Substs<'tcx> { + let defs = tcx.lookup_generics(source_ancestor); + tcx.mk_substs(Substs { + params: target_substs.params.iter() + .chain(&self.params[defs.own_count()..]).cloned().collect() + }) } } -impl<'a,T> Iterator for EnumeratedItems<'a,T> { - type Item = (ParamSpace, usize, &'a T); - - fn next(&mut self) -> Option<(ParamSpace, usize, &'a T)> { - let spaces = ParamSpace::all(); - if self.space_index < spaces.len() { - let space = spaces[self.space_index]; - let index = self.elem_index; - let item = self.vec.get(space, index); - - self.elem_index += 1; - self.adjust_space(); - - Some((space, index, item)) - } else { - None - } - } - - fn size_hint(&self) -> (usize, Option) { - let size = self.vec.as_slice().len(); - (size, Some(size)) +impl<'tcx> TypeFoldable<'tcx> for &'tcx Substs<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + let params = self.params.iter().map(|k| k.fold_with(folder)).collect(); + folder.tcx().mk_substs(Substs { + params: params + }) } -} - -impl IntoIterator for VecPerParamSpace { - type Item = T; - type IntoIter = IntoIter; - fn into_iter(self) -> IntoIter { - self.into_vec().into_iter() + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + folder.fold_substs(self) } -} -impl<'a,T> IntoIterator for &'a VecPerParamSpace { - type Item = &'a T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Iter<'a, T> { - self.as_slice().into_iter() + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.params.visit_with(visitor) } } +impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Substs<'tcx> {} /////////////////////////////////////////////////////////////////////////// // Public trait `Subst` @@ -588,16 +396,18 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for SubstFolder<'a, 'gcx, 'tcx> { t } - fn fold_region(&mut self, r: ty::Region) -> ty::Region { + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { // Note: This routine only handles regions that are bound on // type declarations and other outer declarations, not those // bound in *fn types*. Region substitution of the bound // regions that appear in a function signature is done using // the specialized routine `ty::replace_late_regions()`. - match r { + match *r { ty::ReEarlyBound(data) => { - match self.substs.regions.opt_get(data.space, data.index as usize) { - Some(&r) => { + let r = self.substs.params.get(data.index as usize) + .and_then(|k| k.as_region()); + match r { + Some(r) => { self.shift_region_through_binders(r) } None => { @@ -606,10 +416,9 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for SubstFolder<'a, 'gcx, 'tcx> { span, "Region parameter out of range \ when substituting in region {} (root type={:?}) \ - (space={:?}, index={})", + (index={})", data.name, self.root_ty, - data.space, data.index); } } @@ -652,21 +461,21 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for SubstFolder<'a, 'gcx, 'tcx> { impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> { fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> { // Look up the type in the substitutions. It really should be in there. - let opt_ty = self.substs.types.opt_get(p.space, p.idx as usize); + let opt_ty = self.substs.params.get(p.idx as usize) + .and_then(|k| k.as_type()); let ty = match opt_ty { - Some(t) => *t, + Some(t) => t, None => { let span = self.span.unwrap_or(DUMMY_SP); span_bug!( span, - "Type parameter `{:?}` ({:?}/{:?}/{}) out of range \ + "Type parameter `{:?}` ({:?}/{}) out of range \ when substituting (root type={:?}) substs={:?}", p, source_ty, - p.space, p.idx, self.root_ty, - self.substs); + self.substs.params); } }; @@ -729,7 +538,61 @@ impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> { result } - fn shift_region_through_binders(&self, region: ty::Region) -> ty::Region { - ty::fold::shift_region(region, self.region_binders_passed) + fn shift_region_through_binders(&self, region: &'tcx ty::Region) -> &'tcx ty::Region { + self.tcx().mk_region(ty::fold::shift_region(*region, self.region_binders_passed)) + } +} + +// Helper methods that modify substitutions. + +impl<'a, 'gcx, 'tcx> ty::TraitRef<'tcx> { + pub fn from_method(tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_id: DefId, + substs: &Substs<'tcx>) + -> ty::TraitRef<'tcx> { + let defs = tcx.lookup_generics(trait_id); + + let params = substs.params[..defs.own_count()].iter().cloned(); + ty::TraitRef { + def_id: trait_id, + substs: Substs::new(tcx, params) + } + } +} + +impl<'a, 'gcx, 'tcx> ty::ExistentialTraitRef<'tcx> { + pub fn erase_self_ty(tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_ref: ty::TraitRef<'tcx>) + -> ty::ExistentialTraitRef<'tcx> { + // Assert there is a Self. + trait_ref.substs.type_at(0); + + let params = trait_ref.substs.params[1..].iter().cloned(); + ty::ExistentialTraitRef { + def_id: trait_ref.def_id, + substs: Substs::new(tcx, params) + } + } +} + +impl<'a, 'gcx, 'tcx> ty::PolyExistentialTraitRef<'tcx> { + /// Object types don't have a self-type specified. Therefore, when + /// we convert the principal trait-ref into a normal trait-ref, + /// you must give *some* self-type. A common choice is `mk_err()` + /// or some skolemized type. + pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + self_ty: Ty<'tcx>) + -> ty::PolyTraitRef<'tcx> { + // otherwise the escaping regions would be captured by the binder + assert!(!self_ty.has_escaping_regions()); + + self.map_bound(|trait_ref| { + let params = trait_ref.substs.params.iter().cloned(); + let params = iter::once(Kind::from(self_ty)).chain(params); + ty::TraitRef { + def_id: trait_ref.def_id, + substs: Substs::new(tcx, params) + } + }) } } diff --git a/src/librustc/ty/trait_def.rs b/src/librustc/ty/trait_def.rs index a76dfc35dc..3ff2ed76e5 100644 --- a/src/librustc/ty/trait_def.rs +++ b/src/librustc/ty/trait_def.rs @@ -15,7 +15,6 @@ use ty; use ty::fast_reject; use ty::{Ty, TyCtxt, TraitRef}; use std::cell::{Cell, RefCell}; -use syntax::ast::Name; use hir; use util::nodemap::FnvHashMap; @@ -34,14 +33,10 @@ pub struct TraitDef<'tcx> { /// `Eq`, there is a single bound `Self : Eq`). This is so that /// default methods get to assume that the `Self` parameters /// implements the trait. - pub generics: ty::Generics<'tcx>, + pub generics: &'tcx ty::Generics<'tcx>, pub trait_ref: ty::TraitRef<'tcx>, - /// A list of the associated types defined in this trait. Useful - /// for resolving `X::Foo` type markers. - pub associated_type_names: Vec, - // Impls of a trait. To allow for quicker lookup, the impls are indexed by a // simplified version of their `Self` type: impls with a simplifiable `Self` // are stored in `nonblanket_impls` keyed by it, while all other impls are @@ -70,26 +65,30 @@ pub struct TraitDef<'tcx> { pub specialization_graph: RefCell, /// Various flags - pub flags: Cell + pub flags: Cell, + + /// The ICH of this trait's DefPath, cached here so it doesn't have to be + /// recomputed all the time. + pub def_path_hash: u64, } impl<'a, 'gcx, 'tcx> TraitDef<'tcx> { pub fn new(unsafety: hir::Unsafety, paren_sugar: bool, - generics: ty::Generics<'tcx>, + generics: &'tcx ty::Generics<'tcx>, trait_ref: ty::TraitRef<'tcx>, - associated_type_names: Vec) + def_path_hash: u64) -> TraitDef<'tcx> { TraitDef { paren_sugar: paren_sugar, unsafety: unsafety, generics: generics, trait_ref: trait_ref, - associated_type_names: associated_type_names, nonblanket_impls: RefCell::new(FnvHashMap()), blanket_impls: RefCell::new(vec![]), flags: Cell::new(ty::TraitFlags::NO_TRAIT_FLAGS), specialization_graph: RefCell::new(traits::specialization_graph::Graph::new()), + def_path_hash: def_path_hash, } } diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs index d7bb8ff299..5b0f43e3cf 100644 --- a/src/librustc/ty/util.rs +++ b/src/librustc/ty/util.rs @@ -11,20 +11,23 @@ //! misc. type-system utilities too small to deserve their own file use hir::def_id::DefId; -use ty::subst; use infer::InferCtxt; +use hir::map as ast_map; use hir::pat_util; use traits::{self, Reveal}; -use ty::{self, Ty, TyCtxt, TypeAndMut, TypeFlags, TypeFoldable}; +use ty::{self, Ty, AdtKind, TyCtxt, TypeAndMut, TypeFlags, TypeFoldable}; use ty::{Disr, ParameterEnvironment}; use ty::fold::TypeVisitor; use ty::layout::{Layout, LayoutError}; use ty::TypeVariants::*; +use util::nodemap::FnvHashMap; use rustc_const_math::{ConstInt, ConstIsize, ConstUsize}; +use std::cell::RefCell; use std::cmp; -use std::hash::{Hash, SipHasher, Hasher}; +use std::hash::{Hash, Hasher}; +use std::collections::hash_map::DefaultHasher; use std::intrinsics; use syntax::ast::{self, Name}; use syntax::attr::{self, SignedInt, UnsignedInt}; @@ -139,28 +142,30 @@ impl<'tcx> ParameterEnvironment<'tcx> { // FIXME: (@jroesch) float this code up tcx.infer_ctxt(None, Some(self.clone()), Reveal::ExactMatch).enter(|infcx| { let adt = match self_type.sty { - ty::TyStruct(struct_def, substs) => { - for field in struct_def.all_fields() { - let field_ty = field.ty(tcx, substs); - if infcx.type_moves_by_default(field_ty, span) { - return Err(CopyImplementationError::InfrigingField( - field.name)) - } - } - struct_def - } - ty::TyEnum(enum_def, substs) => { - for variant in &enum_def.variants { - for field in &variant.fields { + ty::TyAdt(adt, substs) => match adt.adt_kind() { + AdtKind::Struct | AdtKind::Union => { + for field in adt.all_fields() { let field_ty = field.ty(tcx, substs); if infcx.type_moves_by_default(field_ty, span) { - return Err(CopyImplementationError::InfrigingVariant( - variant.name)) + return Err(CopyImplementationError::InfrigingField( + field.name)) } } + adt } - enum_def - } + AdtKind::Enum => { + for variant in &adt.variants { + for field in &variant.fields { + let field_ty = field.ty(tcx, substs); + if infcx.type_moves_by_default(field_ty, span) { + return Err(CopyImplementationError::InfrigingVariant( + variant.name)) + } + } + } + adt + } + }, _ => return Err(CopyImplementationError::NotAnAdt) }; @@ -184,7 +189,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn has_error_field(self, ty: Ty<'tcx>) -> bool { match ty.sty { - ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { + ty::TyAdt(def, substs) => { for field in def.all_fields() { let field_ty = field.ty(self, substs); if let TyError = field_ty.sty { @@ -204,15 +209,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { i: usize, variant: Option) -> Option> { match (&ty.sty, variant) { - (&TyStruct(def, substs), None) => { - def.struct_variant().fields.get(i).map(|f| f.ty(self, substs)) + (&TyAdt(adt, substs), Some(vid)) => { + adt.variant_with_id(vid).fields.get(i).map(|f| f.ty(self, substs)) } - (&TyEnum(def, substs), Some(vid)) => { - def.variant_with_id(vid).fields.get(i).map(|f| f.ty(self, substs)) - } - (&TyEnum(def, substs), None) => { - assert!(def.is_univariant()); - def.variants[0].fields.get(i).map(|f| f.ty(self, substs)) + (&TyAdt(adt, substs), None) => { + // Don't use `struct_variant`, this may be a univariant enum. + adt.variants[0].fields.get(i).map(|f| f.ty(self, substs)) } (&TyTuple(ref v), None) => v.get(i).cloned(), _ => None @@ -226,11 +228,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { n: Name, variant: Option) -> Option> { match (&ty.sty, variant) { - (&TyStruct(def, substs), None) => { - def.struct_variant().find_field_named(n).map(|f| f.ty(self, substs)) + (&TyAdt(adt, substs), Some(vid)) => { + adt.variant_with_id(vid).find_field_named(n).map(|f| f.ty(self, substs)) } - (&TyEnum(def, substs), Some(vid)) => { - def.variant_with_id(vid).find_field_named(n).map(|f| f.ty(self, substs)) + (&TyAdt(adt, substs), None) => { + adt.struct_variant().find_field_named(n).map(|f| f.ty(self, substs)) } _ => return None } @@ -242,7 +244,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn enum_repr_type(self, opt_hint: Option<&attr::ReprAttr>) -> attr::IntType { match opt_hint { // Feed in the given type - Some(&attr::ReprInt(_, int_t)) => int_t, + Some(&attr::ReprInt(int_t)) => int_t, // ... but provide sensible default if none provided // // NB. Historically `fn enum_variants` generate i64 here, while @@ -255,7 +257,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// if not a structure at all. Corresponds to the only possible unsized /// field, and its type can be used to determine unsizing strategy. pub fn struct_tail(self, mut ty: Ty<'tcx>) -> Ty<'tcx> { - while let TyStruct(def, substs) = ty.sty { + while let TyAdt(def, substs) = ty.sty { + if !def.is_struct() { + break + } match def.struct_variant().fields.last() { Some(f) => ty = f.ty(self, substs), None => break @@ -274,15 +279,16 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { target: Ty<'tcx>) -> (Ty<'tcx>, Ty<'tcx>) { let (mut a, mut b) = (source, target); - while let (&TyStruct(a_def, a_substs), &TyStruct(b_def, b_substs)) = (&a.sty, &b.sty) { - if a_def != b_def { - break; + while let (&TyAdt(a_def, a_substs), &TyAdt(b_def, b_substs)) = (&a.sty, &b.sty) { + if a_def != b_def || !a_def.is_struct() { + break } - if let Some(f) = a_def.struct_variant().fields.last() { - a = f.ty(self, a_substs); - b = f.ty(self, b_substs); - } else { - break; + match a_def.struct_variant().fields.last() { + Some(f) => { + a = f.ty(self, a_substs); + b = f.ty(self, b_substs); + } + _ => break } } (a, b) @@ -307,7 +313,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn required_region_bounds(self, erased_self_ty: Ty<'tcx>, predicates: Vec>) - -> Vec { + -> Vec<&'tcx ty::Region> { debug!("required_region_bounds(erased_self_ty={:?}, predicates={:?})", erased_self_ty, predicates); @@ -319,7 +325,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { match predicate { ty::Predicate::Projection(..) | ty::Predicate::Trait(..) | - ty::Predicate::Rfc1592(..) | ty::Predicate::Equate(..) | ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | @@ -351,12 +356,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// Creates a hash of the type `Ty` which will be the same no matter what crate /// context it's calculated within. This is used by the `type_id` intrinsic. pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 { - let mut hasher = TypeIdHasher { - tcx: self, - state: SipHasher::new() - }; + let mut hasher = TypeIdHasher::new(self, DefaultHasher::default()); hasher.visit_ty(ty); - hasher.state.finish() + hasher.finish() } /// Returns true if this ADT is a dtorck type. @@ -390,16 +392,88 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } -struct TypeIdHasher<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { +// When hashing a type this ends up affecting properties like symbol names. We +// want these symbol names to be calculated independent of other factors like +// what architecture you're compiling *from*. +// +// The hashing just uses the standard `Hash` trait, but the implementations of +// `Hash` for the `usize` and `isize` types are *not* architecture independent +// (e.g. they has 4 or 8 bytes). As a result we want to avoid `usize` and +// `isize` completely when hashing. To ensure that these don't leak in we use a +// custom hasher implementation here which inflates the size of these to a `u64` +// and `i64`. +struct WidenUsizeHasher { + inner: H, +} + +impl WidenUsizeHasher { + fn new(inner: H) -> WidenUsizeHasher { + WidenUsizeHasher { inner: inner } + } +} + +impl Hasher for WidenUsizeHasher { + fn write(&mut self, bytes: &[u8]) { + self.inner.write(bytes) + } + + fn finish(&self) -> u64 { + self.inner.finish() + } + + fn write_u8(&mut self, i: u8) { + self.inner.write_u8(i) + } + fn write_u16(&mut self, i: u16) { + self.inner.write_u16(i) + } + fn write_u32(&mut self, i: u32) { + self.inner.write_u32(i) + } + fn write_u64(&mut self, i: u64) { + self.inner.write_u64(i) + } + fn write_usize(&mut self, i: usize) { + self.inner.write_u64(i as u64) + } + fn write_i8(&mut self, i: i8) { + self.inner.write_i8(i) + } + fn write_i16(&mut self, i: i16) { + self.inner.write_i16(i) + } + fn write_i32(&mut self, i: i32) { + self.inner.write_i32(i) + } + fn write_i64(&mut self, i: i64) { + self.inner.write_i64(i) + } + fn write_isize(&mut self, i: isize) { + self.inner.write_i64(i as i64) + } +} + +pub struct TypeIdHasher<'a, 'gcx: 'a+'tcx, 'tcx: 'a, H> { tcx: TyCtxt<'a, 'gcx, 'tcx>, - state: SipHasher + state: WidenUsizeHasher, } -impl<'a, 'gcx, 'tcx> TypeIdHasher<'a, 'gcx, 'tcx> { - fn hash(&mut self, x: T) { +impl<'a, 'gcx, 'tcx, H: Hasher> TypeIdHasher<'a, 'gcx, 'tcx, H> { + pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, state: H) -> Self { + TypeIdHasher { + tcx: tcx, + state: WidenUsizeHasher::new(state), + } + } + + pub fn hash(&mut self, x: T) { x.hash(&mut self.state); } + pub fn finish(self) -> u64 { + self.state.finish() + } + fn hash_discriminant_u8(&mut self, x: &T) { let v = unsafe { intrinsics::discriminant_value(x) @@ -410,19 +484,18 @@ impl<'a, 'gcx, 'tcx> TypeIdHasher<'a, 'gcx, 'tcx> { } fn def_id(&mut self, did: DefId) { - // Hash the crate identification information. - let name = self.tcx.crate_name(did.krate); - let disambiguator = self.tcx.crate_disambiguator(did.krate); - self.hash((name, disambiguator)); + // Hash the DefPath corresponding to the DefId, which is independent + // of compiler internal state. + let path = self.tcx.def_path(did); + self.def_path(&path) + } - // Hash the item path within that crate. - // FIXME(#35379) This should use a deterministic - // DefPath hashing mechanism, not the DefIndex. - self.hash(did.index); + pub fn def_path(&mut self, def_path: &ast_map::DefPath) { + def_path.deterministic_hash_to(self.tcx, &mut self.state); } } -impl<'a, 'gcx, 'tcx> TypeVisitor<'tcx> for TypeIdHasher<'a, 'gcx, 'tcx> { +impl<'a, 'gcx, 'tcx, H: Hasher> TypeVisitor<'tcx> for TypeIdHasher<'a, 'gcx, 'tcx, H> { fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool { // Distinguish between the Ty variants uniformly. self.hash_discriminant_u8(&ty.sty); @@ -431,53 +504,27 @@ impl<'a, 'gcx, 'tcx> TypeVisitor<'tcx> for TypeIdHasher<'a, 'gcx, 'tcx> { TyInt(i) => self.hash(i), TyUint(u) => self.hash(u), TyFloat(f) => self.hash(f), - TyStruct(d, _) | - TyEnum(d, _) => self.def_id(d.did), TyArray(_, n) => self.hash(n), TyRawPtr(m) | TyRef(_, m) => self.hash(m.mutbl), TyClosure(def_id, _) | TyAnon(def_id, _) | - TyFnDef(def_id, _, _) => self.def_id(def_id), + TyFnDef(def_id, ..) => self.def_id(def_id), + TyAdt(d, _) => self.def_id(d.did), TyFnPtr(f) => { self.hash(f.unsafety); self.hash(f.abi); self.hash(f.sig.variadic()); + self.hash(f.sig.inputs().skip_binder().len()); } TyTrait(ref data) => { - // Trait objects have a list of projection bounds - // that are not guaranteed to be sorted in an order - // that gets preserved across crates, so we need - // to sort them again by the name, in string form. - - // Hash the whole principal trait ref. - self.def_id(data.principal_def_id()); - data.principal.visit_with(self); - - // Hash region and builtin bounds. - data.bounds.region_bound.visit_with(self); - self.hash(data.bounds.builtin_bounds); - - // Only projection bounds are left, sort and hash them. - let mut projection_bounds: Vec<_> = data.bounds.projection_bounds - .iter() - .map(|b| (b.item_name().as_str(), b)) - .collect(); - projection_bounds.sort_by_key(|&(ref name, _)| name.clone()); - for (name, bound) in projection_bounds { - self.def_id(bound.0.projection_ty.trait_ref.def_id); - self.hash(name); - bound.visit_with(self); - } - - // Bypass super_visit_with, we've visited everything. - return false; + self.def_id(data.principal.def_id()); + self.hash(data.builtin_bounds); } TyTuple(tys) => { self.hash(tys.len()); } TyParam(p) => { - self.hash(p.space); self.hash(p.idx); self.hash(p.name.as_str()); } @@ -490,17 +537,18 @@ impl<'a, 'gcx, 'tcx> TypeVisitor<'tcx> for TypeIdHasher<'a, 'gcx, 'tcx> { TyChar | TyStr | TyBox(_) | - TySlice(_) | - TyError => {} - TyInfer(_) => bug!() + TySlice(_) => {} + + TyError | + TyInfer(_) => bug!("TypeIdHasher: unexpected type {}", ty) } ty.super_visit_with(self) } - fn visit_region(&mut self, r: ty::Region) -> bool { - match r { - ty::ReStatic | ty::ReErased => { + fn visit_region(&mut self, r: &'tcx ty::Region) -> bool { + match *r { + ty::ReErased => { self.hash::(0); } ty::ReLateBound(db, ty::BrAnon(i)) => { @@ -508,6 +556,7 @@ impl<'a, 'gcx, 'tcx> TypeVisitor<'tcx> for TypeIdHasher<'a, 'gcx, 'tcx> { self.hash::(db.depth); self.hash(i); } + ty::ReStatic | ty::ReEmpty | ty::ReEarlyBound(..) | ty::ReLateBound(..) | @@ -515,7 +564,7 @@ impl<'a, 'gcx, 'tcx> TypeVisitor<'tcx> for TypeIdHasher<'a, 'gcx, 'tcx> { ty::ReScope(..) | ty::ReVar(..) | ty::ReSkolemized(..) => { - bug!("unexpected region found when hashing a type") + bug!("TypeIdHasher: unexpected region {:?}", r) } } false @@ -532,11 +581,24 @@ impl<'a, 'gcx, 'tcx> TypeVisitor<'tcx> for TypeIdHasher<'a, 'gcx, 'tcx> { impl<'a, 'tcx> ty::TyS<'tcx> { fn impls_bound(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: &ParameterEnvironment<'tcx>, - bound: ty::BuiltinBound, span: Span) -> bool + bound: ty::BuiltinBound, + cache: &RefCell, bool>>, + span: Span) -> bool { - tcx.infer_ctxt(None, Some(param_env.clone()), Reveal::ExactMatch).enter(|infcx| { - traits::type_known_to_meet_builtin_bound(&infcx, self, bound, span) - }) + if self.has_param_types() || self.has_self_ty() { + if let Some(result) = cache.borrow().get(self) { + return *result; + } + } + let result = + tcx.infer_ctxt(None, Some(param_env.clone()), Reveal::ExactMatch) + .enter(|infcx| { + traits::type_known_to_meet_builtin_bound(&infcx, self, bound, span) + }); + if self.has_param_types() || self.has_self_ty() { + cache.borrow_mut().insert(self, result); + } + return result; } // FIXME (@jroesch): I made this public to use it, not sure if should be private @@ -560,10 +622,12 @@ impl<'a, 'tcx> ty::TyS<'tcx> { mutbl: hir::MutMutable, .. }) => Some(true), - TyArray(..) | TySlice(_) | TyTrait(..) | TyTuple(..) | - TyClosure(..) | TyEnum(..) | TyStruct(..) | TyAnon(..) | + TyArray(..) | TySlice(..) | TyTrait(..) | TyTuple(..) | + TyClosure(..) | TyAdt(..) | TyAnon(..) | TyProjection(..) | TyParam(..) | TyInfer(..) | TyError => None - }.unwrap_or_else(|| !self.impls_bound(tcx, param_env, ty::BoundCopy, span)); + }.unwrap_or_else(|| { + !self.impls_bound(tcx, param_env, ty::BoundCopy, ¶m_env.is_copy_cache, span) + }); if !self.has_param_types() && !self.has_self_ty() { self.flags.set(self.flags.get() | if result { @@ -601,9 +665,11 @@ impl<'a, 'tcx> ty::TyS<'tcx> { TyStr | TyTrait(..) | TySlice(_) => Some(false), - TyEnum(..) | TyStruct(..) | TyProjection(..) | TyParam(..) | + TyAdt(..) | TyProjection(..) | TyParam(..) | TyInfer(..) | TyAnon(..) | TyError => None - }.unwrap_or_else(|| self.impls_bound(tcx, param_env, ty::BoundSized, span)); + }.unwrap_or_else(|| { + self.impls_bound(tcx, param_env, ty::BoundSized, ¶m_env.is_sized_cache, span) + }); if !self.has_param_types() && !self.has_self_ty() { self.flags.set(self.flags.get() | if result { @@ -627,10 +693,19 @@ impl<'a, 'tcx> ty::TyS<'tcx> { } } + let rec_limit = tcx.sess.recursion_limit.get(); + let depth = tcx.layout_depth.get(); + if depth > rec_limit { + tcx.sess.fatal( + &format!("overflow representing the type `{}`", self)); + } + + tcx.layout_depth.set(depth+1); let layout = Layout::compute_uncached(self, infcx)?; if can_cache { tcx.layout_cache.borrow_mut().insert(self, layout); } + tcx.layout_depth.set(depth); Ok(layout) } @@ -663,7 +738,7 @@ impl<'a, 'tcx> ty::TyS<'tcx> { TyArray(ty, _) => { is_type_structurally_recursive(tcx, sp, seen, ty) } - TyStruct(def, substs) | TyEnum(def, substs) => { + TyAdt(def, substs) => { find_nonrepresentable(tcx, sp, seen, @@ -680,7 +755,7 @@ impl<'a, 'tcx> ty::TyS<'tcx> { fn same_struct_or_enum<'tcx>(ty: Ty<'tcx>, def: ty::AdtDef<'tcx>) -> bool { match ty.sty { - TyStruct(ty_def, _) | TyEnum(ty_def, _) => { + TyAdt(ty_def, _) => { ty_def == def } _ => false @@ -689,18 +764,12 @@ impl<'a, 'tcx> ty::TyS<'tcx> { fn same_type<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool { match (&a.sty, &b.sty) { - (&TyStruct(did_a, ref substs_a), &TyStruct(did_b, ref substs_b)) | - (&TyEnum(did_a, ref substs_a), &TyEnum(did_b, ref substs_b)) => { + (&TyAdt(did_a, substs_a), &TyAdt(did_b, substs_b)) => { if did_a != did_b { return false; } - let types_a = substs_a.types.get_slice(subst::TypeSpace); - let types_b = substs_b.types.get_slice(subst::TypeSpace); - - let mut pairs = types_a.iter().zip(types_b); - - pairs.all(|(&a, &b)| same_type(a, b)) + substs_a.types().zip(substs_b.types()).all(|(a, b)| same_type(a, b)) } _ => { a == b @@ -717,7 +786,7 @@ impl<'a, 'tcx> ty::TyS<'tcx> { debug!("is_type_structurally_recursive: {:?}", ty); match ty.sty { - TyStruct(def, _) | TyEnum(def, _) => { + TyAdt(def, _) => { { // Iterate through stack of previously seen types. let mut iter = seen.iter(); diff --git a/src/librustc/ty/walk.rs b/src/librustc/ty/walk.rs index 9c1f9d9537..dd3a62f7cd 100644 --- a/src/librustc/ty/walk.rs +++ b/src/librustc/ty/walk.rs @@ -67,6 +67,12 @@ pub fn walk_shallow<'tcx>(ty: Ty<'tcx>) -> IntoIter> { stack.into_iter() } +// We push types on the stack in reverse order so as to +// maintain a pre-order traversal. As of the time of this +// writing, the fact that the traversal is pre-order is not +// known to be significant to any code, but it seems like the +// natural order one would expect (basically, the order of the +// types as they are written). fn push_subtypes<'tcx>(stack: &mut Vec>, parent_ty: Ty<'tcx>) { match parent_ty.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | @@ -79,28 +85,26 @@ fn push_subtypes<'tcx>(stack: &mut Vec>, parent_ty: Ty<'tcx>) { stack.push(mt.ty); } ty::TyProjection(ref data) => { - push_reversed(stack, data.trait_ref.substs.types.as_slice()); + stack.extend(data.trait_ref.substs.types().rev()); } - ty::TyTrait(box ty::TraitTy { ref principal, ref bounds }) => { - push_reversed(stack, principal.substs().types.as_slice()); - push_reversed(stack, &bounds.projection_bounds.iter().map(|pred| { + ty::TyTrait(ref obj) => { + stack.extend(obj.principal.input_types().rev()); + stack.extend(obj.projection_bounds.iter().map(|pred| { pred.0.ty - }).collect::>()); + }).rev()); } - ty::TyEnum(_, ref substs) | - ty::TyStruct(_, ref substs) | - ty::TyAnon(_, ref substs) => { - push_reversed(stack, substs.types.as_slice()); + ty::TyAdt(_, substs) | ty::TyAnon(_, substs) => { + stack.extend(substs.types().rev()); } ty::TyClosure(_, ref substs) => { - push_reversed(stack, substs.func_substs.types.as_slice()); - push_reversed(stack, &substs.upvar_tys); + stack.extend(substs.func_substs.types().rev()); + stack.extend(substs.upvar_tys.iter().cloned().rev()); } - ty::TyTuple(ref ts) => { - push_reversed(stack, ts); + ty::TyTuple(ts) => { + stack.extend(ts.iter().cloned().rev()); } ty::TyFnDef(_, substs, ref ft) => { - push_reversed(stack, substs.types.as_slice()); + stack.extend(substs.types().rev()); push_sig_subtypes(stack, &ft.sig); } ty::TyFnPtr(ref ft) => { @@ -111,17 +115,5 @@ fn push_subtypes<'tcx>(stack: &mut Vec>, parent_ty: Ty<'tcx>) { fn push_sig_subtypes<'tcx>(stack: &mut Vec>, sig: &ty::PolyFnSig<'tcx>) { stack.push(sig.0.output); - push_reversed(stack, &sig.0.inputs); -} - -fn push_reversed<'tcx>(stack: &mut Vec>, tys: &[Ty<'tcx>]) { - // We push slices on the stack in reverse order so as to - // maintain a pre-order traversal. As of the time of this - // writing, the fact that the traversal is pre-order is not - // known to be significant to any code, but it seems like the - // natural order one would expect (basically, the order of the - // types as they are written). - for &ty in tys.iter().rev() { - stack.push(ty); - } + stack.extend(sig.0.inputs.iter().cloned().rev()); } diff --git a/src/librustc/ty/wf.rs b/src/librustc/ty/wf.rs index bfc2e11d9f..0557660e98 100644 --- a/src/librustc/ty/wf.rs +++ b/src/librustc/ty/wf.rs @@ -94,9 +94,6 @@ pub fn predicate_obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, } ty::Predicate::ClosureKind(..) => { } - ty::Predicate::Rfc1592(ref data) => { - bug!("RFC1592 predicate `{:?}` in predicate_obligations", data); - } } wf.normalize() @@ -115,9 +112,9 @@ pub fn predicate_obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, /// For `&'a T` to be WF, `T: 'a` must hold. So we can assume `T: 'a`. #[derive(Debug)] pub enum ImpliedBound<'tcx> { - RegionSubRegion(ty::Region, ty::Region), - RegionSubParam(ty::Region, ty::ParamTy), - RegionSubProjection(ty::Region, ty::ProjectionTy<'tcx>), + RegionSubRegion(&'tcx ty::Region, &'tcx ty::Region), + RegionSubParam(&'tcx ty::Region, ty::ParamTy), + RegionSubProjection(&'tcx ty::Region, ty::ProjectionTy<'tcx>), } /// Compute the implied bounds that a callee/impl can assume based on @@ -158,7 +155,6 @@ pub fn implied_bounds<'a, 'gcx, 'tcx>( assert!(!obligation.has_escaping_regions()); match obligation.predicate { ty::Predicate::Trait(..) | - ty::Predicate::Rfc1592(..) | ty::Predicate::Equate(..) | ty::Predicate::Projection(..) | ty::Predicate::ClosureKind(..) | @@ -196,7 +192,7 @@ pub fn implied_bounds<'a, 'gcx, 'tcx>( /// this down to determine what relationships would have to hold for /// `T: 'a` to hold. We get to assume that the caller has validated /// those relationships. -fn implied_bounds_from_components<'tcx>(sub_region: ty::Region, +fn implied_bounds_from_components<'tcx>(sub_region: &'tcx ty::Region, sup_components: Vec>) -> Vec> { @@ -260,9 +256,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { let cause = self.cause(traits::MiscObligation); self.out.extend( - trait_ref.substs.types - .as_slice() - .iter() + trait_ref.substs.types() .filter(|ty| !ty.has_escaping_regions()) .map(|ty| traits::Obligation::new(cause.clone(), ty::Predicate::WellFormed(ty)))); @@ -284,21 +278,14 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { } } - fn require_sized(&mut self, subty: Ty<'tcx>, cause: traits::ObligationCauseCode<'tcx>, - rfc1592: bool) { + fn require_sized(&mut self, subty: Ty<'tcx>, cause: traits::ObligationCauseCode<'tcx>) { if !subty.has_escaping_regions() { let cause = self.cause(cause); match self.infcx.tcx.trait_ref_for_builtin_bound(ty::BoundSized, subty) { Ok(trait_ref) => { - let predicate = trait_ref.to_predicate(); - let predicate = if rfc1592 { - ty::Predicate::Rfc1592(box predicate) - } else { - predicate - }; self.out.push( traits::Obligation::new(cause, - predicate)); + trait_ref.to_predicate())); } Err(ErrorReported) => { } } @@ -328,13 +315,13 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { ty::TySlice(subty) | ty::TyArray(subty, _) => { - self.require_sized(subty, traits::SliceOrArrayElem, false); + self.require_sized(subty, traits::SliceOrArrayElem); } ty::TyTuple(ref tys) => { if let Some((_last, rest)) = tys.split_last() { for elem in rest { - self.require_sized(elem, traits::TupleElem, true); + self.require_sized(elem, traits::TupleElem); } } } @@ -349,8 +336,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { self.compute_projection(data); } - ty::TyEnum(def, substs) | - ty::TyStruct(def, substs) => { + ty::TyAdt(def, substs) => { // WfNominalType let obligations = self.nominal_obligations(def.did, substs); self.out.extend(obligations); @@ -365,7 +351,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { cause, ty::Predicate::TypeOutlives( ty::Binder( - ty::OutlivesPredicate(mt.ty, *r))))); + ty::OutlivesPredicate(mt.ty, r))))); } } @@ -403,22 +389,15 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { let cause = self.cause(traits::MiscObligation); - // FIXME(#33243): remove RFC1592 - self.out.push(traits::Obligation::new( - cause.clone(), - ty::Predicate::ObjectSafe(data.principal_def_id()) - )); let component_traits = - data.bounds.builtin_bounds.iter().flat_map(|bound| { + data.builtin_bounds.iter().flat_map(|bound| { tcx.lang_items.from_builtin_kind(bound).ok() - }); -// .chain(Some(data.principal_def_id())); + }) + .chain(Some(data.principal.def_id())); self.out.extend( component_traits.map(|did| { traits::Obligation::new( cause.clone(), - ty::Predicate::Rfc1592( - box ty::Predicate::ObjectSafe(did) - ) + ty::Predicate::ObjectSafe(did) )}) ); } @@ -476,7 +455,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { .collect() } - fn from_object_ty(&mut self, ty: Ty<'tcx>, data: &ty::TraitTy<'tcx>) { + fn from_object_ty(&mut self, ty: Ty<'tcx>, data: &ty::TraitObject<'tcx>) { // Imagine a type like this: // // trait Foo { } @@ -512,10 +491,10 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { if !data.has_escaping_regions() { let implicit_bounds = object_region_bounds(self.infcx.tcx, - &data.principal, - data.bounds.builtin_bounds); + data.principal, + data.builtin_bounds); - let explicit_bound = data.bounds.region_bound; + let explicit_bound = data.region_bound; for implicit_bound in implicit_bounds { let cause = self.cause(traits::ReferenceOutlivesReferent(ty)); @@ -534,22 +513,17 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { /// `ty::required_region_bounds`, see that for more information. pub fn object_region_bounds<'a, 'gcx, 'tcx>( tcx: TyCtxt<'a, 'gcx, 'tcx>, - principal: &ty::PolyTraitRef<'tcx>, + principal: ty::PolyExistentialTraitRef<'tcx>, others: ty::BuiltinBounds) - -> Vec + -> Vec<&'tcx ty::Region> { // Since we don't actually *know* the self type for an object, // this "open(err)" serves as a kind of dummy standin -- basically // a skolemized type. let open_ty = tcx.mk_infer(ty::FreshTy(0)); - // Note that we preserve the overall binding levels here. - assert!(!open_ty.has_escaping_regions()); - let substs = tcx.mk_substs(principal.0.substs.with_self_ty(open_ty)); - let trait_refs = vec!(ty::Binder(ty::TraitRef::new(principal.0.def_id, substs))); - let mut predicates = others.to_predicates(tcx, open_ty); - predicates.extend(trait_refs.iter().map(|t| t.to_predicate())); + predicates.push(principal.with_self_ty(tcx, open_ty).to_predicate()); tcx.required_region_bounds(open_ty, predicates) } diff --git a/src/librustc/util/common.rs b/src/librustc/util/common.rs index bdfb97549d..78f20b77f3 100644 --- a/src/librustc/util/common.rs +++ b/src/librustc/util/common.rs @@ -17,7 +17,7 @@ use std::fmt::Debug; use std::hash::{Hash, BuildHasher}; use std::iter::repeat; use std::path::Path; -use std::time::Instant; +use std::time::{Duration, Instant}; use hir; use hir::intravisit; @@ -47,12 +47,6 @@ pub fn time(do_it: bool, what: &str, f: F) -> T where let rv = f(); let dur = start.elapsed(); - // Hack up our own formatting for the duration to make it easier for scripts - // to parse (always use the same number of decimal places and the same unit). - const NANOS_PER_SEC: f64 = 1_000_000_000.0; - let secs = dur.as_secs() as f64; - let secs = secs + dur.subsec_nanos() as f64 / NANOS_PER_SEC; - let mem_string = match get_resident() { Some(n) => { let mb = n as f64 / 1_000_000.0; @@ -60,14 +54,37 @@ pub fn time(do_it: bool, what: &str, f: F) -> T where } None => "".to_owned(), }; - println!("{}time: {:.3}{}\t{}", repeat(" ").take(old).collect::(), - secs, mem_string, what); + println!("{}time: {}{}\t{}", + repeat(" ").take(old).collect::(), + duration_to_secs_str(dur), + mem_string, + what); DEPTH.with(|slot| slot.set(old)); rv } +// Hack up our own formatting for the duration to make it easier for scripts +// to parse (always use the same number of decimal places and the same unit). +pub fn duration_to_secs_str(dur: Duration) -> String { + const NANOS_PER_SEC: f64 = 1_000_000_000.0; + let secs = dur.as_secs() as f64 + + dur.subsec_nanos() as f64 / NANOS_PER_SEC; + + format!("{:.3}", secs) +} + +pub fn record_time(accu: &Cell, f: F) -> T where + F: FnOnce() -> T, +{ + let start = Instant::now(); + let rv = f(); + let duration = start.elapsed(); + accu.set(duration + accu.get()); + rv +} + // Like std::macros::try!, but for Option<>. macro_rules! option_try( ($e:expr) => (match $e { Some(e) => e, None => return None }) diff --git a/src/librustc/util/fs.rs b/src/librustc/util/fs.rs index f4e1c06090..c290d8f893 100644 --- a/src/librustc/util/fs.rs +++ b/src/librustc/util/fs.rs @@ -56,14 +56,49 @@ pub fn fix_windows_verbatim_for_gcc(p: &Path) -> PathBuf { } } +pub enum LinkOrCopy { + Link, + Copy +} + /// Copy `p` into `q`, preferring to use hard-linking if possible. If /// `q` already exists, it is removed first. -pub fn link_or_copy, Q: AsRef>(p: P, q: Q) -> io::Result<()> { +/// The result indicates which of the two operations has been performed. +pub fn link_or_copy, Q: AsRef>(p: P, q: Q) -> io::Result { let p = p.as_ref(); let q = q.as_ref(); if q.exists() { - try!(fs::remove_file(&q)); + fs::remove_file(&q)?; + } + + match fs::hard_link(p, q) { + Ok(()) => Ok(LinkOrCopy::Link), + Err(_) => { + match fs::copy(p, q) { + Ok(_) => Ok(LinkOrCopy::Copy), + Err(e) => Err(e) + } + } + } +} + +// Like std::fs::create_dir_all, except handles concurrent calls among multiple +// threads or processes. +pub fn create_dir_racy(path: &Path) -> io::Result<()> { + match fs::create_dir(path) { + Ok(()) => return Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => return Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::NotFound => {} + Err(e) => return Err(e), + } + match path.parent() { + Some(p) => try!(create_dir_racy(p)), + None => return Err(io::Error::new(io::ErrorKind::Other, + "failed to create whole tree")), + } + match fs::create_dir(path) { + Ok(()) => Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()), + Err(e) => Err(e), } - fs::hard_link(p, q) - .or_else(|_| fs::copy(p, q).map(|_| ())) } diff --git a/src/librustc/util/ppaux.rs b/src/librustc/util/ppaux.rs index 896ef49de6..312cab2454 100644 --- a/src/librustc/util/ppaux.rs +++ b/src/librustc/util/ppaux.rs @@ -8,11 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - use hir::def_id::DefId; -use ty::subst::{self, Subst}; +use ty::subst::{self, Subst, Substs}; +use hir::map::definitions::DefPathData; use ty::{BrAnon, BrEnv, BrFresh, BrNamed}; -use ty::{TyBool, TyChar, TyStruct, TyEnum}; +use ty::{TyBool, TyChar, TyAdt}; use ty::{TyError, TyStr, TyArray, TySlice, TyFloat, TyFnDef, TyFnPtr}; use ty::{TyParam, TyRawPtr, TyRef, TyNever, TyTuple}; use ty::TyClosure; @@ -22,6 +22,8 @@ use ty::fold::{TypeFolder, TypeVisitor}; use std::cell::Cell; use std::fmt; +use std::usize; + use syntax::abi::Abi; use syntax::parse::token; use syntax::ast::CRATE_NODE_ID; @@ -55,79 +57,107 @@ fn fn_sig(f: &mut fmt::Formatter, Ok(()) } -/// Namespace of the path given to parameterized to print. -#[derive(Copy, Clone, PartialEq)] -pub enum Ns { - Type, - Value -} +pub fn parameterized(f: &mut fmt::Formatter, + substs: &subst::Substs, + mut did: DefId, + projections: &[ty::ProjectionPredicate]) + -> fmt::Result { + let key = ty::tls::with(|tcx| tcx.def_key(did)); + let mut item_name = if let Some(name) = key.disambiguated_data.data.get_opt_name() { + Some(name) + } else { + did.index = key.parent.unwrap_or_else( + || bug!("finding type for {:?}, encountered def-id {:?} with no parent", + did, did)); + parameterized(f, substs, did, projections)?; + return write!(f, "::{}", key.disambiguated_data.data.as_interned_str()); + }; -fn number_of_supplied_defaults<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, - substs: &subst::Substs, - space: subst::ParamSpace, - generics: ty::Generics<'tcx>) - -> usize -{ - let has_self = substs.self_ty().is_some(); - let ty_params = generics.types.get_slice(space); - let tps = substs.types.get_slice(space); - if ty_params.last().map_or(false, |def| def.default.is_some()) { - let substs = tcx.lift(&substs); - ty_params.iter().zip(tps).rev().take_while(|&(def, &actual)| { - match def.default { - Some(default) => { - if !has_self && default.has_self_ty() { - // In an object type, there is no `Self`, and - // thus if the default value references Self, - // the user will be required to give an - // explicit value. We can't even do the - // substitution below to check without causing - // an ICE. (#18956). - false - } else { - let default = tcx.lift(&default); - substs.and_then(|substs| default.subst(tcx, substs)) - == Some(actual) - } + let mut verbose = false; + let mut num_supplied_defaults = 0; + let mut has_self = false; + let mut num_regions = 0; + let mut num_types = 0; + let mut is_value_path = false; + let fn_trait_kind = ty::tls::with(|tcx| { + // Unfortunately, some kinds of items (e.g., closures) don't have + // generics. So walk back up the find the closest parent that DOES + // have them. + let mut item_def_id = did; + loop { + let key = tcx.def_key(item_def_id); + match key.disambiguated_data.data { + DefPathData::TypeNs(_) => { + break; + } + DefPathData::ValueNs(_) | DefPathData::EnumVariant(_) => { + is_value_path = true; + break; + } + _ => { + // if we're making a symbol for something, there ought + // to be a value or type-def or something in there + // *somewhere* + item_def_id.index = key.parent.unwrap_or_else(|| { + bug!("finding type for {:?}, encountered def-id {:?} with no \ + parent", did, item_def_id); + }); } - None => false } - }).count() - } else { - 0 - } -} - -pub fn parameterized(f: &mut fmt::Formatter, - substs: &subst::Substs, - did: DefId, - ns: Ns, - projections: &[ty::ProjectionPredicate], - get_generics: GG) - -> fmt::Result - where GG: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) - -> Option> -{ - if let (Ns::Value, Some(self_ty)) = (ns, substs.self_ty()) { - write!(f, "<{} as ", self_ty)?; - } + } + let mut generics = tcx.lookup_generics(item_def_id); + let mut path_def_id = did; + verbose = tcx.sess.verbose(); + has_self = generics.has_self; + + let mut child_types = 0; + if let Some(def_id) = generics.parent { + // Methods. + assert!(is_value_path); + child_types = generics.types.len(); + generics = tcx.lookup_generics(def_id); + num_regions = generics.regions.len(); + num_types = generics.types.len(); + + if has_self { + write!(f, "<{} as ", substs.type_at(0))?; + } - let (fn_trait_kind, verbose, item_name) = ty::tls::with(|tcx| { - let (did, item_name) = if ns == Ns::Value { - // Try to get the impl/trait parent, if this is an - // associated value item (method or constant). - tcx.trait_of_item(did).or_else(|| tcx.impl_of_method(did)) - .map_or((did, None), |parent| (parent, Some(tcx.item_name(did)))) + path_def_id = def_id; } else { - (did, None) - }; - write!(f, "{}", tcx.item_path_str(did))?; - Ok((tcx.lang_items.fn_trait_kind(did), tcx.sess.verbose(), item_name)) + item_name = None; + + if is_value_path { + // Functions. + assert_eq!(has_self, false); + } else { + // Types and traits. + num_regions = generics.regions.len(); + num_types = generics.types.len(); + } + } + + if !verbose { + if generics.types.last().map_or(false, |def| def.default.is_some()) { + if let Some(substs) = tcx.lift(&substs) { + let tps = substs.types().rev().skip(child_types); + for (def, actual) in generics.types.iter().rev().zip(tps) { + if def.default.subst(tcx, substs) != Some(actual) { + break; + } + num_supplied_defaults += 1; + } + } + } + } + + write!(f, "{}", tcx.item_path_str(path_def_id))?; + Ok(tcx.lang_items.fn_trait_kind(path_def_id)) })?; if !verbose && fn_trait_kind.is_some() && projections.len() == 1 { let projection_ty = projections[0].ty; - if let TyTuple(ref args) = substs.types.get_slice(subst::TypeSpace)[0].sty { + if let TyTuple(ref args) = substs.type_at(1).sty { return fn_sig(f, args, false, projection_ty); } } @@ -142,13 +172,15 @@ pub fn parameterized(f: &mut fmt::Formatter, } }; - let print_regions = |f: &mut fmt::Formatter, start: &str, regions: &[ty::Region]| { + let print_regions = |f: &mut fmt::Formatter, start: &str, skip, count| { // Don't print any regions if they're all erased. - if regions.iter().all(|r| *r == ty::ReErased) { + let regions = || substs.regions().skip(skip).take(count); + if regions().all(|r: &ty::Region| *r == ty::ReErased) { return Ok(()); } - for region in regions { + for region in regions() { + let region: &ty::Region = region; start_or_continue(f, start, ", ")?; if verbose { write!(f, "{:?}", region)?; @@ -170,23 +202,12 @@ pub fn parameterized(f: &mut fmt::Formatter, Ok(()) }; - print_regions(f, "<", substs.regions.get_slice(subst::TypeSpace))?; - - let num_supplied_defaults = if verbose { - 0 - } else { - ty::tls::with(|tcx| { - if let Some(generics) = get_generics(tcx) { - number_of_supplied_defaults(tcx, substs, subst::TypeSpace, generics) - } else { - 0 - } - }) - }; + print_regions(f, "<", 0, num_regions)?; - let tps = substs.types.get_slice(subst::TypeSpace); + let tps = substs.types().take(num_types - num_supplied_defaults) + .skip(has_self as usize); - for &ty in &tps[..tps.len() - num_supplied_defaults] { + for ty in tps { start_or_continue(f, "<", ", ")?; write!(f, "{}", ty)?; } @@ -201,10 +222,10 @@ pub fn parameterized(f: &mut fmt::Formatter, start_or_continue(f, "", ">")?; // For values, also print their name and type parameters. - if ns == Ns::Value { + if is_value_path { empty.set(true); - if substs.self_ty().is_some() { + if has_self { write!(f, ">")?; } @@ -212,10 +233,10 @@ pub fn parameterized(f: &mut fmt::Formatter, write!(f, "::{}", item_name)?; } - print_regions(f, "::<", substs.regions.get_slice(subst::FnSpace))?; + print_regions(f, "::<", num_regions, usize::MAX)?; // FIXME: consider being smart with defaults here too - for ty in substs.types.get_slice(subst::FnSpace) { + for ty in substs.types().skip(num_types) { start_or_continue(f, "::<", ", ")?; write!(f, "{}", ty)?; } @@ -255,7 +276,7 @@ fn in_binder<'a, 'gcx, 'tcx, T, U>(f: &mut fmt::Formatter, let new_value = tcx.replace_late_bound_regions(&value, |br| { let _ = start_or_continue(f, "for<", ", "); - ty::ReLateBound(ty::DebruijnIndex::new(1), match br { + let br = match br { ty::BrNamed(_, name, _) => { let _ = write!(f, "{}", name); br @@ -269,7 +290,8 @@ fn in_binder<'a, 'gcx, 'tcx, T, U>(f: &mut fmt::Formatter, name, ty::Issue32330::WontChange) } - }) + }; + tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), br)) }).0; start_or_continue(f, "", "> ")?; @@ -288,7 +310,8 @@ fn in_binder<'a, 'gcx, 'tcx, T, U>(f: &mut fmt::Formatter, /// projection bounds, so we just stuff them altogether. But in /// reality we should eventually sort things out better. #[derive(Clone, Debug)] -struct TraitAndProjections<'tcx>(ty::TraitRef<'tcx>, Vec>); +struct TraitAndProjections<'tcx>(ty::TraitRef<'tcx>, + Vec>); impl<'tcx> TypeFoldable<'tcx> for TraitAndProjections<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { @@ -305,30 +328,32 @@ impl<'tcx> fmt::Display for TraitAndProjections<'tcx> { let TraitAndProjections(ref trait_ref, ref projection_bounds) = *self; parameterized(f, trait_ref.substs, trait_ref.def_id, - Ns::Type, - projection_bounds, - |tcx| Some(tcx.lookup_trait_def(trait_ref.def_id).generics.clone())) + projection_bounds) } } -impl<'tcx> fmt::Display for ty::TraitTy<'tcx> { +impl<'tcx> fmt::Display for ty::TraitObject<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let bounds = &self.bounds; - // Generate the main trait ref, including associated types. ty::tls::with(|tcx| { - let principal = tcx.lift(&self.principal.0) - .expect("could not lift TraitRef for printing"); - let projections = tcx.lift(&bounds.projection_bounds[..]) - .expect("could not lift projections for printing"); - let projections = projections.into_iter().map(|p| p.0).collect(); + // Use a type that can't appear in defaults of type parameters. + let dummy_self = tcx.mk_infer(ty::FreshTy(0)); + + let principal = tcx.lift(&self.principal) + .expect("could not lift TraitRef for printing") + .with_self_ty(tcx, dummy_self).0; + let projections = self.projection_bounds.iter().map(|p| { + tcx.lift(p) + .expect("could not lift projection for printing") + .with_self_ty(tcx, dummy_self).0 + }).collect(); let tap = ty::Binder(TraitAndProjections(principal, projections)); in_binder(f, tcx, &ty::Binder(""), Some(tap)) })?; // Builtin bounds. - for bound in &bounds.builtin_bounds { + for bound in &self.builtin_bounds { write!(f, " + {:?}", bound)?; } @@ -337,7 +362,7 @@ impl<'tcx> fmt::Display for ty::TraitTy<'tcx> { // use thread-local data of some kind? There are also // advantages to just showing the region, since it makes // people aware that it's there. - let bound = bounds.region_bound.to_string(); + let bound = self.region_bound.to_string(); if !bound.is_empty() { write!(f, " + {}", bound)?; } @@ -348,19 +373,19 @@ impl<'tcx> fmt::Display for ty::TraitTy<'tcx> { impl<'tcx> fmt::Debug for ty::TypeParameterDef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TypeParameterDef({}, {:?}, {:?}/{})", + write!(f, "TypeParameterDef({}, {:?}, {})", self.name, self.def_id, - self.space, self.index) + self.index) } } -impl fmt::Debug for ty::RegionParameterDef { +impl<'tcx> fmt::Debug for ty::RegionParameterDef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "RegionParameterDef({}, {:?}, {:?}/{}, {:?})", + write!(f, "RegionParameterDef({}, {:?}, {}, {:?})", self.name, self.def_id, - self.space, self.index, + self.index, self.bounds) } } @@ -379,13 +404,6 @@ impl<'tcx> fmt::Display for ty::TypeAndMut<'tcx> { } } -impl<'tcx> fmt::Debug for subst::Substs<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Substs[types={:?}, regions={:?}]", - self.types, self.regions) - } -} - impl<'tcx> fmt::Debug for ty::ItemSubsts<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "ItemSubsts({:?})", self.substs) @@ -397,10 +415,20 @@ impl<'tcx> fmt::Debug for ty::TraitRef<'tcx> { // when printing out the debug representation, we don't need // to enumerate the `for<...>` etc because the debruijn index // tells you everything you need to know. - match self.substs.self_ty() { - None => write!(f, "{}", *self), - Some(self_ty) => write!(f, "<{:?} as {}>", self_ty, *self) - } + write!(f, "<{:?} as {}>", self.self_ty(), *self) + } +} + +impl<'tcx> fmt::Debug for ty::ExistentialTraitRef<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + ty::tls::with(|tcx| { + let dummy_self = tcx.mk_infer(ty::FreshTy(0)); + + let trait_ref = tcx.lift(&ty::Binder(*self)) + .expect("could not lift TraitRef for printing") + .with_self_ty(tcx, dummy_self).0; + parameterized(f, trait_ref.substs, trait_ref.def_id, &[]) + }) } } @@ -448,11 +476,38 @@ impl<'tcx> fmt::Debug for ty::adjustment::AutoDerefRef<'tcx> { } } -impl<'tcx> fmt::Debug for ty::TraitTy<'tcx> { +impl<'tcx> fmt::Debug for ty::TraitObject<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TraitTy({:?},{:?})", - self.principal, - self.bounds) + let mut empty = true; + let mut maybe_continue = |f: &mut fmt::Formatter| { + if empty { + empty = false; + Ok(()) + } else { + write!(f, " + ") + } + }; + + maybe_continue(f)?; + write!(f, "{:?}", self.principal)?; + + let region_str = format!("{:?}", self.region_bound); + if !region_str.is_empty() { + maybe_continue(f)?; + write!(f, "{}", region_str)?; + } + + for bound in &self.builtin_bounds { + maybe_continue(f)?; + write!(f, "{:?}", bound)?; + } + + for projection_bound in &self.projection_bounds { + maybe_continue(f)?; + write!(f, "{:?}", projection_bound)?; + } + + Ok(()) } } @@ -460,9 +515,6 @@ impl<'tcx> fmt::Debug for ty::Predicate<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ty::Predicate::Trait(ref a) => write!(f, "{:?}", a), - ty::Predicate::Rfc1592(ref a) => { - write!(f, "RFC1592({:?})", a) - } ty::Predicate::Equate(ref pair) => write!(f, "{:?}", pair), ty::Predicate::RegionOutlives(ref pair) => write!(f, "{:?}", pair), ty::Predicate::TypeOutlives(ref pair) => write!(f, "{:?}", pair), @@ -509,8 +561,7 @@ impl fmt::Debug for ty::Region { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ty::ReEarlyBound(ref data) => { - write!(f, "ReEarlyBound({:?}, {}, {})", - data.space, + write!(f, "ReEarlyBound({}, {})", data.index, data.name) } @@ -573,7 +624,7 @@ impl<'tcx> fmt::Debug for ty::ParameterEnvironment<'tcx> { } } -impl<'tcx> fmt::Debug for ty::ObjectLifetimeDefault { +impl<'tcx> fmt::Debug for ty::ObjectLifetimeDefault<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ty::ObjectLifetimeDefault::Ambiguous => write!(f, "Ambiguous"), @@ -629,13 +680,6 @@ impl fmt::Debug for ty::Variance { } } -impl fmt::Debug for ty::ItemVariances { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "ItemVariances(types={:?}, regions={:?})", - self.types, self.regions) - } -} - impl<'tcx> fmt::Debug for ty::GenericPredicates<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "GenericPredicates({:?})", self.predicates) @@ -668,38 +712,6 @@ impl<'tcx> fmt::Display for ty::FnSig<'tcx> { } } -impl<'tcx> fmt::Debug for ty::ExistentialBounds<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut empty = true; - let mut maybe_continue = |f: &mut fmt::Formatter| { - if empty { - empty = false; - Ok(()) - } else { - write!(f, " + ") - } - }; - - let region_str = format!("{:?}", self.region_bound); - if !region_str.is_empty() { - maybe_continue(f)?; - write!(f, "{}", region_str)?; - } - - for bound in &self.builtin_bounds { - maybe_continue(f)?; - write!(f, "{:?}", bound)?; - } - - for projection_bound in &self.projection_bounds { - maybe_continue(f)?; - write!(f, "{:?}", projection_bound)?; - } - - Ok(()) - } -} - impl fmt::Display for ty::BuiltinBounds { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut bounds = self.iter(); @@ -800,13 +812,14 @@ impl<'tcx> fmt::Display for ty::Binder> { } } -impl<'tcx> fmt::Display for ty::Binder, ty::Region>> { +impl<'tcx> fmt::Display for ty::Binder, &'tcx ty::Region>> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self))) } } -impl fmt::Display for ty::Binder> { +impl<'tcx> fmt::Display for ty::Binder> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self))) } @@ -814,8 +827,7 @@ impl fmt::Display for ty::Binder> impl<'tcx> fmt::Display for ty::TraitRef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - parameterized(f, self.substs, self.def_id, Ns::Type, &[], - |tcx| Some(tcx.lookup_trait_def(self.def_id).generics.clone())) + parameterized(f, self.substs, self.def_id, &[]) } } @@ -868,9 +880,7 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> { } write!(f, "{} {{", bare_fn.sig.0)?; - parameterized( - f, substs, def_id, Ns::Value, &[], - |tcx| tcx.opt_lookup_item_type(def_id).map(|t| t.generics))?; + parameterized(f, substs, def_id, &[])?; write!(f, "}}") } TyFnPtr(ref bare_fn) => { @@ -887,18 +897,13 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> { TyInfer(infer_ty) => write!(f, "{}", infer_ty), TyError => write!(f, "[type error]"), TyParam(ref param_ty) => write!(f, "{}", param_ty), - TyEnum(def, substs) | TyStruct(def, substs) => { + TyAdt(def, substs) => { ty::tls::with(|tcx| { if def.did.is_local() && !tcx.tcache.borrow().contains_key(&def.did) { write!(f, "{}<..>", tcx.item_path_str(def.did)) } else { - parameterized( - f, substs, def.did, Ns::Type, &[], - |tcx| { - tcx.opt_lookup_item_type(def.did). - map(|t| t.generics) - }) + parameterized(f, substs, def.did, &[]) } }) } @@ -910,14 +915,14 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> { // by looking up the projections associated with the def_id. let item_predicates = tcx.lookup_predicates(def_id); let substs = tcx.lift(&substs).unwrap_or_else(|| { - tcx.mk_substs(subst::Substs::empty()) + Substs::empty(tcx) }); let bounds = item_predicates.instantiate(tcx, substs); let mut first = true; let mut is_sized = false; write!(f, "impl")?; - for predicate in bounds.predicates.into_vec() { + for predicate in bounds.predicates { if let Some(trait_ref) = predicate.to_opt_poly_trait_ref() { // Don't print +Sized, but rather +?Sized if absent. if Some(trait_ref.def_id()) == tcx.lang_items.sized_trait() { @@ -944,7 +949,8 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> { let mut sep = " "; tcx.with_freevars(node_id, |freevars| { for (freevar, upvar_ty) in freevars.iter().zip(substs.upvar_tys) { - let node_id = freevar.def.var_id(); + let def_id = freevar.def.def_id(); + let node_id = tcx.map.as_local_node_id(def_id).unwrap(); write!(f, "{}{}:{}", sep, @@ -988,7 +994,7 @@ impl fmt::Debug for ty::UpvarId { } } -impl fmt::Debug for ty::UpvarBorrow { +impl<'tcx> fmt::Debug for ty::UpvarBorrow<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "UpvarBorrow({:?}, {:?})", self.kind, self.region) @@ -1012,7 +1018,7 @@ impl fmt::Display for ty::InferTy { } } -impl fmt::Display for ty::ExplicitSelfCategory { +impl<'tcx> fmt::Display for ty::ExplicitSelfCategory<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(match *self { ty::ExplicitSelfCategory::Static => "static", @@ -1034,7 +1040,7 @@ impl fmt::Display for ty::ParamTy { impl fmt::Debug for ty::ParamTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}/{:?}.{}", self, self.space, self.idx) + write!(f, "{}/#{}", self, self.idx) } } @@ -1103,7 +1109,6 @@ impl<'tcx> fmt::Display for ty::Predicate<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ty::Predicate::Trait(ref data) => write!(f, "{}", data), - ty::Predicate::Rfc1592(ref data) => write!(f, "{}", data), ty::Predicate::Equate(ref predicate) => write!(f, "{}", predicate), ty::Predicate::RegionOutlives(ref predicate) => write!(f, "{}", predicate), ty::Predicate::TypeOutlives(ref predicate) => write!(f, "{}", predicate), diff --git a/src/librustc_back/lib.rs b/src/librustc_back/lib.rs index 6a7bc51d15..92b03b5ca2 100644 --- a/src/librustc_back/lib.rs +++ b/src/librustc_back/lib.rs @@ -37,7 +37,7 @@ #![feature(rustc_private)] #![feature(staged_api)] #![feature(step_by)] -#![feature(question_mark)] +#![cfg_attr(stage0, feature(question_mark))] #![cfg_attr(test, feature(test, rand))] extern crate syntax; @@ -46,7 +46,6 @@ extern crate serialize; #[macro_use] extern crate log; pub mod tempdir; -pub mod rpath; pub mod sha2; pub mod target; pub mod slice; diff --git a/src/librustc_back/target/aarch64_apple_ios.rs b/src/librustc_back/target/aarch64_apple_ios.rs index 6530ccb063..660ed0ac7b 100644 --- a/src/librustc_back/target/aarch64_apple_ios.rs +++ b/src/librustc_back/target/aarch64_apple_ios.rs @@ -12,7 +12,7 @@ use target::{Target, TargetOptions, TargetResult}; use super::apple_ios_base::{opts, Arch}; pub fn target() -> TargetResult { - let base = try!(opts(Arch::Arm64)); + let base = opts(Arch::Arm64)?; Ok(Target { llvm_target: "arm64-apple-ios".to_string(), target_endian: "little".to_string(), diff --git a/src/librustc_back/target/apple_ios_base.rs b/src/librustc_back/target/apple_ios_base.rs index 8bd9feabdb..17492b8bdc 100644 --- a/src/librustc_back/target/apple_ios_base.rs +++ b/src/librustc_back/target/apple_ios_base.rs @@ -68,7 +68,7 @@ fn build_pre_link_args(arch: Arch) -> Result, String> { let arch_name = arch.to_string(); - let sdk_root = try!(get_sdk_root(sdk_name)); + let sdk_root = get_sdk_root(sdk_name)?; Ok(vec!["-arch".to_string(), arch_name.to_string(), "-Wl,-syslibroot".to_string(), sdk_root]) @@ -85,7 +85,7 @@ fn target_cpu(arch: Arch) -> String { } pub fn opts(arch: Arch) -> Result { - let pre_link_args = try!(build_pre_link_args(arch)); + let pre_link_args = build_pre_link_args(arch)?; Ok(TargetOptions { cpu: target_cpu(arch), dynamic_linking: false, diff --git a/src/librustc_back/target/armv7_apple_ios.rs b/src/librustc_back/target/armv7_apple_ios.rs index a806204d0a..71533a09b1 100644 --- a/src/librustc_back/target/armv7_apple_ios.rs +++ b/src/librustc_back/target/armv7_apple_ios.rs @@ -12,7 +12,7 @@ use target::{Target, TargetOptions, TargetResult}; use super::apple_ios_base::{opts, Arch}; pub fn target() -> TargetResult { - let base = try!(opts(Arch::Armv7)); + let base = opts(Arch::Armv7)?; Ok(Target { llvm_target: "armv7-apple-ios".to_string(), target_endian: "little".to_string(), diff --git a/src/librustc_back/target/armv7_unknown_linux_gnueabihf.rs b/src/librustc_back/target/armv7_unknown_linux_gnueabihf.rs index 52269f0cd4..7e0306a03e 100644 --- a/src/librustc_back/target/armv7_unknown_linux_gnueabihf.rs +++ b/src/librustc_back/target/armv7_unknown_linux_gnueabihf.rs @@ -23,8 +23,9 @@ pub fn target() -> TargetResult { target_vendor: "unknown".to_string(), options: TargetOptions { - features: "+v7,+vfp3,+neon".to_string(), - cpu: "cortex-a8".to_string(), + // Info about features at https://wiki.debian.org/ArmHardFloatPort + features: "+v7,+vfp3,+d16,+thumb2".to_string(), + cpu: "generic".to_string(), max_atomic_width: 64, .. base } diff --git a/src/librustc_back/target/armv7s_apple_ios.rs b/src/librustc_back/target/armv7s_apple_ios.rs index aaa3570fa6..f24b996991 100644 --- a/src/librustc_back/target/armv7s_apple_ios.rs +++ b/src/librustc_back/target/armv7s_apple_ios.rs @@ -12,7 +12,7 @@ use target::{Target, TargetOptions, TargetResult}; use super::apple_ios_base::{opts, Arch}; pub fn target() -> TargetResult { - let base = try!(opts(Arch::Armv7s)); + let base = opts(Arch::Armv7s)?; Ok(Target { llvm_target: "armv7s-apple-ios".to_string(), target_endian: "little".to_string(), diff --git a/src/librustc_back/target/asmjs_unknown_emscripten.rs b/src/librustc_back/target/asmjs_unknown_emscripten.rs index 07eb191471..9ccfdbb129 100644 --- a/src/librustc_back/target/asmjs_unknown_emscripten.rs +++ b/src/librustc_back/target/asmjs_unknown_emscripten.rs @@ -18,7 +18,6 @@ pub fn target() -> Result { dynamic_linking: false, executables: true, exe_suffix: ".js".to_string(), - no_compiler_rt: true, linker_is_gnu: true, allow_asm: false, obj_is_bitcode: true, diff --git a/src/librustc_back/target/haiku_base.rs b/src/librustc_back/target/haiku_base.rs new file mode 100644 index 0000000000..5e319ba183 --- /dev/null +++ b/src/librustc_back/target/haiku_base.rs @@ -0,0 +1,23 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::TargetOptions; +use std::default::Default; + +pub fn opts() -> TargetOptions { + TargetOptions { + linker: "cc".to_string(), + dynamic_linking: true, + executables: true, + has_rpath: true, + linker_is_gnu: true, + .. Default::default() + } +} diff --git a/src/librustc_back/target/i386_apple_ios.rs b/src/librustc_back/target/i386_apple_ios.rs index f391d4118e..94146fe9d9 100644 --- a/src/librustc_back/target/i386_apple_ios.rs +++ b/src/librustc_back/target/i386_apple_ios.rs @@ -12,7 +12,7 @@ use target::{Target, TargetOptions, TargetResult}; use super::apple_ios_base::{opts, Arch}; pub fn target() -> TargetResult { - let base = try!(opts(Arch::I386)); + let base = opts(Arch::I386)?; Ok(Target { llvm_target: "i386-apple-ios".to_string(), target_endian: "little".to_string(), diff --git a/src/librustc_back/target/i586_pc_windows_msvc.rs b/src/librustc_back/target/i586_pc_windows_msvc.rs index 445ee6c412..9b88cde598 100644 --- a/src/librustc_back/target/i586_pc_windows_msvc.rs +++ b/src/librustc_back/target/i586_pc_windows_msvc.rs @@ -11,7 +11,7 @@ use target::TargetResult; pub fn target() -> TargetResult { - let mut base = try!(super::i686_pc_windows_msvc::target()); + let mut base = super::i686_pc_windows_msvc::target()?; base.options.cpu = "pentium".to_string(); base.llvm_target = "i586-pc-windows-msvc".to_string(); Ok(base) diff --git a/src/librustc_back/target/i586_unknown_linux_gnu.rs b/src/librustc_back/target/i586_unknown_linux_gnu.rs index 1ca8606149..40fb4a67ac 100644 --- a/src/librustc_back/target/i586_unknown_linux_gnu.rs +++ b/src/librustc_back/target/i586_unknown_linux_gnu.rs @@ -11,7 +11,7 @@ use target::TargetResult; pub fn target() -> TargetResult { - let mut base = try!(super::i686_unknown_linux_gnu::target()); + let mut base = super::i686_unknown_linux_gnu::target()?; base.options.cpu = "pentium".to_string(); base.llvm_target = "i586-unknown-linux-gnu".to_string(); Ok(base) diff --git a/src/librustc_back/target/i686_unknown_haiku.rs b/src/librustc_back/target/i686_unknown_haiku.rs new file mode 100644 index 0000000000..862016704f --- /dev/null +++ b/src/librustc_back/target/i686_unknown_haiku.rs @@ -0,0 +1,30 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::haiku_base::opts(); + base.cpu = "pentium4".to_string(); + base.max_atomic_width = 64; + base.pre_link_args.push("-m32".to_string()); + + Ok(Target { + llvm_target: "i686-unknown-haiku".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), + arch: "x86".to_string(), + target_os: "haiku".to_string(), + target_env: "".to_string(), + target_vendor: "unknown".to_string(), + options: base, + }) +} diff --git a/src/librustc_back/target/le32_unknown_nacl.rs b/src/librustc_back/target/le32_unknown_nacl.rs index 25132f8a04..9ba6591f58 100644 --- a/src/librustc_back/target/le32_unknown_nacl.rs +++ b/src/librustc_back/target/le32_unknown_nacl.rs @@ -22,7 +22,6 @@ pub fn target() -> TargetResult { dynamic_linking: false, executables: true, exe_suffix: ".pexe".to_string(), - no_compiler_rt: false, linker_is_gnu: true, allow_asm: false, max_atomic_width: 32, diff --git a/src/librustc_back/target/mips64_unknown_linux_gnuabi64.rs b/src/librustc_back/target/mips64_unknown_linux_gnuabi64.rs new file mode 100644 index 0000000000..7e45b32065 --- /dev/null +++ b/src/librustc_back/target/mips64_unknown_linux_gnuabi64.rs @@ -0,0 +1,31 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "mips64-unknown-linux-gnuabi64".to_string(), + target_endian: "big".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(), + arch: "mips64".to_string(), + target_os: "linux".to_string(), + target_env: "gnu".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + // NOTE(mips64r2) matches C toolchain + cpu: "mips64r2".to_string(), + features: "+mips64r2".to_string(), + max_atomic_width: 64, + ..super::linux_base::opts() + }, + }) +} diff --git a/src/librustc_back/target/mips64el_unknown_linux_gnuabi64.rs b/src/librustc_back/target/mips64el_unknown_linux_gnuabi64.rs new file mode 100644 index 0000000000..338a5da1e1 --- /dev/null +++ b/src/librustc_back/target/mips64el_unknown_linux_gnuabi64.rs @@ -0,0 +1,31 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "mips64el-unknown-linux-gnuabi64".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(), + arch: "mips64".to_string(), + target_os: "linux".to_string(), + target_env: "gnu".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + // NOTE(mips64r2) matches C toolchain + cpu: "mips64r2".to_string(), + features: "+mips64r2".to_string(), + max_atomic_width: 64, + ..super::linux_base::opts() + }, + }) +} diff --git a/src/librustc_back/target/mips_unknown_linux_uclibc.rs b/src/librustc_back/target/mips_unknown_linux_uclibc.rs new file mode 100644 index 0000000000..529bd31039 --- /dev/null +++ b/src/librustc_back/target/mips_unknown_linux_uclibc.rs @@ -0,0 +1,30 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "mips-unknown-linux-uclibc".to_string(), + target_endian: "big".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), + arch: "mips".to_string(), + target_os: "linux".to_string(), + target_env: "uclibc".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + cpu: "mips32r2".to_string(), + features: "+mips32r2,+soft-float".to_string(), + max_atomic_width: 32, + ..super::linux_base::opts() + }, + }) +} diff --git a/src/librustc_back/target/mipsel_unknown_linux_uclibc.rs b/src/librustc_back/target/mipsel_unknown_linux_uclibc.rs new file mode 100644 index 0000000000..1040a0fbe1 --- /dev/null +++ b/src/librustc_back/target/mipsel_unknown_linux_uclibc.rs @@ -0,0 +1,31 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "mipsel-unknown-linux-uclibc".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), + arch: "mips".to_string(), + target_os: "linux".to_string(), + target_env: "uclibc".to_string(), + target_vendor: "unknown".to_string(), + + options: TargetOptions { + cpu: "mips32".to_string(), + features: "+mips32,+soft-float".to_string(), + max_atomic_width: 32, + ..super::linux_base::opts() + }, + }) +} diff --git a/src/librustc_back/target/mod.rs b/src/librustc_back/target/mod.rs index 18686e3f1d..087078021a 100644 --- a/src/librustc_back/target/mod.rs +++ b/src/librustc_back/target/mod.rs @@ -56,6 +56,7 @@ mod apple_ios_base; mod bitrig_base; mod dragonfly_base; mod freebsd_base; +mod haiku_base; mod linux_base; mod linux_musl_base; mod openbsd_base; @@ -77,12 +78,12 @@ macro_rules! supported_targets { match target { $( $triple => { - let mut t = try!($module::target()); + let mut t = $module::target()?; t.options.is_builtin = true; // round-trip through the JSON parser to ensure at // run-time that the parser works correctly - t = try!(Target::from_json(t.to_json())); + t = Target::from_json(t.to_json())?; debug!("Got builtin target: {:?}", t); Ok(t) }, @@ -128,10 +129,13 @@ supported_targets! { ("i686-unknown-linux-gnu", i686_unknown_linux_gnu), ("i586-unknown-linux-gnu", i586_unknown_linux_gnu), ("mips-unknown-linux-gnu", mips_unknown_linux_gnu), + ("mips64-unknown-linux-gnuabi64", mips64_unknown_linux_gnuabi64), + ("mips64el-unknown-linux-gnuabi64", mips64el_unknown_linux_gnuabi64), ("mipsel-unknown-linux-gnu", mipsel_unknown_linux_gnu), ("powerpc-unknown-linux-gnu", powerpc_unknown_linux_gnu), ("powerpc64-unknown-linux-gnu", powerpc64_unknown_linux_gnu), ("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu), + ("s390x-unknown-linux-gnu", s390x_unknown_linux_gnu), ("arm-unknown-linux-gnueabi", arm_unknown_linux_gnueabi), ("arm-unknown-linux-gnueabihf", arm_unknown_linux_gnueabihf), ("arm-unknown-linux-musleabi", arm_unknown_linux_musleabi), @@ -143,6 +147,8 @@ supported_targets! { ("i686-unknown-linux-musl", i686_unknown_linux_musl), ("mips-unknown-linux-musl", mips_unknown_linux_musl), ("mipsel-unknown-linux-musl", mipsel_unknown_linux_musl), + ("mips-unknown-linux-uclibc", mips_unknown_linux_uclibc), + ("mipsel-unknown-linux-uclibc", mipsel_unknown_linux_uclibc), ("i686-linux-android", i686_linux_android), ("arm-linux-androideabi", arm_linux_androideabi), @@ -160,6 +166,9 @@ supported_targets! { ("x86_64-unknown-netbsd", x86_64_unknown_netbsd), ("x86_64-rumprun-netbsd", x86_64_rumprun_netbsd), + ("i686_unknown_haiku", i686_unknown_haiku), + ("x86_64_unknown_haiku", x86_64_unknown_haiku), + ("x86_64-apple-darwin", x86_64_apple_darwin), ("i686-apple-darwin", i686_apple_darwin), @@ -301,9 +310,6 @@ pub struct TargetOptions { pub allows_weak_linkage: bool, /// Whether the linker support rpaths or not. Defaults to false. pub has_rpath: bool, - /// Whether to disable linking to compiler-rt. Defaults to false, as LLVM - /// will emit references to the functions that compiler-rt provides. - pub no_compiler_rt: bool, /// Whether to disable linking to the default libraries, typically corresponds /// to `-nodefaultlibs`. Defaults to true. pub no_default_libraries: bool, @@ -376,7 +382,6 @@ impl Default for TargetOptions { linker_is_gnu: false, allows_weak_linkage: true, has_rpath: false, - no_compiler_rt: false, no_default_libraries: true, position_independent_executables: false, pre_link_objects_exe: Vec::new(), @@ -437,12 +442,12 @@ impl Target { }; let mut base = Target { - llvm_target: try!(get_req_field("llvm-target")), - target_endian: try!(get_req_field("target-endian")), - target_pointer_width: try!(get_req_field("target-pointer-width")), - data_layout: try!(get_req_field("data-layout")), - arch: try!(get_req_field("arch")), - target_os: try!(get_req_field("os")), + llvm_target: get_req_field("llvm-target")?, + target_endian: get_req_field("target-endian")?, + target_pointer_width: get_req_field("target-pointer-width")?, + data_layout: get_req_field("data-layout")?, + arch: get_req_field("arch")?, + target_os: get_req_field("os")?, target_env: get_opt_field("env", ""), target_vendor: get_opt_field("vendor", "unknown"), options: Default::default(), @@ -519,7 +524,6 @@ impl Target { key!(linker_is_gnu, bool); key!(allows_weak_linkage, bool); key!(has_rpath, bool); - key!(no_compiler_rt, bool); key!(no_default_libraries, bool); key!(position_independent_executables, bool); key!(archive_format); @@ -662,7 +666,6 @@ impl ToJson for Target { target_option_val!(linker_is_gnu); target_option_val!(allows_weak_linkage); target_option_val!(has_rpath); - target_option_val!(no_compiler_rt); target_option_val!(no_default_libraries); target_option_val!(position_independent_executables); target_option_val!(archive_format); diff --git a/src/librustc_back/target/s390x_unknown_linux_gnu.rs b/src/librustc_back/target/s390x_unknown_linux_gnu.rs new file mode 100644 index 0000000000..79f2d290e3 --- /dev/null +++ b/src/librustc_back/target/s390x_unknown_linux_gnu.rs @@ -0,0 +1,34 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::linux_base::opts(); + // z10 is the oldest CPU supported by LLVM + base.cpu = "z10".to_string(); + // FIXME: The data_layout string below and the ABI implementation in + // cabi_s390x.rs are for now hard-coded to assume the no-vector ABI. + // Pass the -vector feature string to LLVM to respect this assumption. + base.features = "-vector".to_string(); + base.max_atomic_width = 64; + + Ok(Target { + llvm_target: "s390x-unknown-linux-gnu".to_string(), + target_endian: "big".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64".to_string(), + arch: "s390x".to_string(), + target_os: "linux".to_string(), + target_env: "gnu".to_string(), + target_vendor: "unknown".to_string(), + options: base, + }) +} diff --git a/src/librustc_back/target/x86_64_apple_ios.rs b/src/librustc_back/target/x86_64_apple_ios.rs index 4afc9bcb94..3b8b636b6d 100644 --- a/src/librustc_back/target/x86_64_apple_ios.rs +++ b/src/librustc_back/target/x86_64_apple_ios.rs @@ -12,7 +12,7 @@ use target::{Target, TargetOptions, TargetResult}; use super::apple_ios_base::{opts, Arch}; pub fn target() -> TargetResult { - let base = try!(opts(Arch::X86_64)); + let base = opts(Arch::X86_64)?; Ok(Target { llvm_target: "x86_64-apple-ios".to_string(), target_endian: "little".to_string(), diff --git a/src/librustc_back/target/x86_64_unknown_haiku.rs b/src/librustc_back/target/x86_64_unknown_haiku.rs new file mode 100644 index 0000000000..171e88cee5 --- /dev/null +++ b/src/librustc_back/target/x86_64_unknown_haiku.rs @@ -0,0 +1,30 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::haiku_base::opts(); + base.cpu = "x86-64".to_string(); + base.max_atomic_width = 64; + base.pre_link_args.push("-m64".to_string()); + + Ok(Target { + llvm_target: "x86_64-unknown-haiku".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), + arch: "x86_64".to_string(), + target_os: "haiku".to_string(), + target_env: "".to_string(), + target_vendor: "unknown".to_string(), + options: base, + }) +} diff --git a/src/librustc_bitflags/lib.rs b/src/librustc_bitflags/lib.rs index afc2e04d44..e65d112430 100644 --- a/src/librustc_bitflags/lib.rs +++ b/src/librustc_bitflags/lib.rs @@ -201,7 +201,7 @@ macro_rules! bitflags { !(*self & other).is_empty() } - /// Returns `true` all of the flags in `other` are contained within `self`. + /// Returns `true` if all of the flags in `other` are contained within `self`. #[inline] pub fn contains(&self, other: $BitFlags) -> bool { (*self & other) == other @@ -291,7 +291,8 @@ macro_rules! bitflags { #[cfg(test)] #[allow(non_upper_case_globals)] mod tests { - use std::hash::{Hash, Hasher, SipHasher}; + use std::hash::{Hash, Hasher}; + use std::collections::hash_map::DefaultHasher; use std::option::Option::{None, Some}; bitflags! { @@ -492,7 +493,7 @@ mod tests { } fn hash(t: &T) -> u64 { - let mut s = SipHasher::new(); + let mut s = DefaultHasher::new(); t.hash(&mut s); s.finish() } diff --git a/src/librustc_borrowck/borrowck/check_loans.rs b/src/librustc_borrowck/borrowck/check_loans.rs index 9cae270984..089733da53 100644 --- a/src/librustc_borrowck/borrowck/check_loans.rs +++ b/src/librustc_borrowck/borrowck/check_loans.rs @@ -56,7 +56,7 @@ fn owned_ptr_base_path<'a, 'tcx>(loan_path: &'a LoanPath<'tcx>) -> &'a LoanPath< } } LpDowncast(ref lp_base, _) | - LpExtend(ref lp_base, _, _) => helper(&lp_base) + LpExtend(ref lp_base, ..) => helper(&lp_base) } } } @@ -80,7 +80,7 @@ fn owned_ptr_base_path_rc<'tcx>(loan_path: &Rc>) -> Rc helper(lp_base) + LpExtend(ref lp_base, ..) => helper(lp_base) } } } @@ -126,7 +126,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { borrow_id: ast::NodeId, borrow_span: Span, cmt: mc::cmt<'tcx>, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, bk: ty::BorrowKind, loan_cause: euv::LoanCause) { @@ -312,7 +312,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { break; } LpDowncast(ref lp_base, _) | - LpExtend(ref lp_base, _, _) => { + LpExtend(ref lp_base, ..) => { loan_path = &lp_base; } } @@ -542,7 +542,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { err } - (_, _) => { + (..) => { let mut err = struct_span_err!(self.bccx, new_loan.span, E0502, "cannot borrow `{}`{} as {} because \ {} is also borrowed as {}{}", @@ -647,10 +647,13 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { struct_span_err!(self.bccx, span, E0503, "cannot use `{}` because it was mutably borrowed", &self.bccx.loan_path_to_string(copy_path)) - .span_note(loan_span, + .span_label(loan_span, &format!("borrow of `{}` occurs here", &self.bccx.loan_path_to_string(&loan_path)) ) + .span_label(span, + &format!("use of borrowed `{}`", + &self.bccx.loan_path_to_string(&loan_path))) .emit(); } } @@ -793,7 +796,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { } LpExtend(ref lp_base, _, LpInterior(_, InteriorField(_))) => { match lp_base.to_type().sty { - ty::TyStruct(def, _) | ty::TyEnum(def, _) if def.has_dtor() => { + ty::TyAdt(def, _) if def.has_dtor() => { // In the case where the owner implements drop, then // the path must be initialized to prevent a case of // partial reinitialization diff --git a/src/librustc_borrowck/borrowck/fragments.rs b/src/librustc_borrowck/borrowck/fragments.rs index d3d6fa9eb5..515868c460 100644 --- a/src/librustc_borrowck/borrowck/fragments.rs +++ b/src/librustc_borrowck/borrowck/fragments.rs @@ -21,13 +21,12 @@ use borrowck::LoanPathElem::{LpDeref, LpInterior}; use borrowck::move_data::InvalidMovePathIndex; use borrowck::move_data::{MoveData, MovePathIndex}; use rustc::hir::def_id::{DefId}; -use rustc::ty::{self, TyCtxt}; +use rustc::ty::{self, AdtKind, TyCtxt}; use rustc::middle::mem_categorization as mc; use std::mem; use std::rc::Rc; use syntax::ast; -use syntax::attr::AttrMetaMethods; use syntax_pos::{Span, DUMMY_SP}; #[derive(PartialEq, Eq, PartialOrd, Ord)] @@ -366,9 +365,9 @@ fn add_fragment_siblings<'a, 'tcx>(this: &MoveData<'tcx>, } // *LV for unsafe and borrowed pointers do not consume their loan path, so stop here. - LpExtend(_, _, LpDeref(mc::UnsafePtr(..))) | - LpExtend(_, _, LpDeref(mc::Implicit(..))) | - LpExtend(_, _, LpDeref(mc::BorrowedPtr(..))) => {} + LpExtend(.., LpDeref(mc::UnsafePtr(..))) | + LpExtend(.., LpDeref(mc::Implicit(..))) | + LpExtend(.., LpDeref(mc::BorrowedPtr(..))) => {} // FIXME (pnkfelix): LV[j] should be tracked, at least in the // sense of we will track the remaining drop obligation of the @@ -379,7 +378,7 @@ fn add_fragment_siblings<'a, 'tcx>(this: &MoveData<'tcx>, // bind. // // Anyway, for now: LV[j] is not tracked precisely - LpExtend(_, _, LpInterior(_, InteriorElement(..))) => { + LpExtend(.., LpInterior(_, InteriorElement(..))) => { let mp = this.move_path(tcx, lp.clone()); gathered_fragments.push(AllButOneFrom(mp)); } @@ -423,8 +422,8 @@ fn add_fragment_siblings_for_extension<'a, 'tcx>(this: &MoveData<'tcx>, variant_did); }; - match (&parent_ty.sty, enum_variant_info) { - (&ty::TyTuple(ref v), None) => { + match parent_ty.sty { + ty::TyTuple(ref v) => { let tuple_idx = match *origin_field_name { mc::PositionalField(tuple_idx) => tuple_idx, mc::NamedField(_) => @@ -439,65 +438,68 @@ fn add_fragment_siblings_for_extension<'a, 'tcx>(this: &MoveData<'tcx>, } } - (&ty::TyStruct(def, _), None) => { - match *origin_field_name { - mc::NamedField(ast_name) => { - for f in &def.struct_variant().fields { - if f.name == ast_name { - continue; + ty::TyAdt(def, ..) => match def.adt_kind() { + AdtKind::Struct => { + match *origin_field_name { + mc::NamedField(ast_name) => { + for f in &def.struct_variant().fields { + if f.name == ast_name { + continue; + } + let field_name = mc::NamedField(f.name); + add_fragment_sibling_local(field_name, None); } - let field_name = mc::NamedField(f.name); - add_fragment_sibling_local(field_name, None); } - } - mc::PositionalField(tuple_idx) => { - for (i, _f) in def.struct_variant().fields.iter().enumerate() { - if i == tuple_idx { - continue + mc::PositionalField(tuple_idx) => { + for (i, _f) in def.struct_variant().fields.iter().enumerate() { + if i == tuple_idx { + continue + } + let field_name = mc::PositionalField(i); + add_fragment_sibling_local(field_name, None); } - let field_name = mc::PositionalField(i); - add_fragment_sibling_local(field_name, None); } } } - } - - (&ty::TyEnum(def, _), ref enum_variant_info) => { - let variant = match *enum_variant_info { - Some((vid, ref _lp2)) => def.variant_with_id(vid), - None => { - assert!(def.is_univariant()); - &def.variants[0] - } - }; - match *origin_field_name { - mc::NamedField(ast_name) => { - for field in &variant.fields { - if field.name == ast_name { - continue; + AdtKind::Union => { + // Do nothing, all union fields are moved/assigned together. + } + AdtKind::Enum => { + let variant = match enum_variant_info { + Some((vid, ref _lp2)) => def.variant_with_id(vid), + None => { + assert!(def.is_univariant()); + &def.variants[0] + } + }; + match *origin_field_name { + mc::NamedField(ast_name) => { + for field in &variant.fields { + if field.name == ast_name { + continue; + } + let field_name = mc::NamedField(field.name); + add_fragment_sibling_local(field_name, Some(variant.did)); } - let field_name = mc::NamedField(field.name); - add_fragment_sibling_local(field_name, Some(variant.did)); } - } - mc::PositionalField(tuple_idx) => { - for (i, _f) in variant.fields.iter().enumerate() { - if tuple_idx == i { - continue; + mc::PositionalField(tuple_idx) => { + for (i, _f) in variant.fields.iter().enumerate() { + if tuple_idx == i { + continue; + } + let field_name = mc::PositionalField(i); + add_fragment_sibling_local(field_name, None); } - let field_name = mc::PositionalField(i); - add_fragment_sibling_local(field_name, None); } } } - } + }, - ref sty_and_variant_info => { + ref ty => { let opt_span = origin_id.and_then(|id|tcx.map.opt_span(id)); span_bug!(opt_span.unwrap_or(DUMMY_SP), "type {:?} ({:?}) is not fragmentable", - parent_ty, - sty_and_variant_info); + parent_ty, ty); } } } diff --git a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs index 9431dcdbca..9bdc6887f6 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs @@ -161,9 +161,9 @@ fn check_and_get_illegal_move_origin<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, cmt: &mc::cmt<'tcx>) -> Option> { match cmt.cat { - Categorization::Deref(_, _, mc::BorrowedPtr(..)) | - Categorization::Deref(_, _, mc::Implicit(..)) | - Categorization::Deref(_, _, mc::UnsafePtr(..)) | + Categorization::Deref(.., mc::BorrowedPtr(..)) | + Categorization::Deref(.., mc::Implicit(..)) | + Categorization::Deref(.., mc::UnsafePtr(..)) | Categorization::StaticItem => { Some(cmt.clone()) } @@ -178,13 +178,14 @@ fn check_and_get_illegal_move_origin<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, Categorization::Interior(ref b, mc::InteriorField(_)) | Categorization::Interior(ref b, mc::InteriorElement(Kind::Pattern, _)) => { match b.ty.sty { - ty::TyStruct(def, _) | ty::TyEnum(def, _) => { + ty::TyAdt(def, _) => { if def.has_dtor() { Some(cmt.clone()) } else { check_and_get_illegal_move_origin(bccx, b) } } + ty::TySlice(..) => Some(cmt.clone()), _ => { check_and_get_illegal_move_origin(bccx, b) } diff --git a/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs b/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs index e34c6e567b..5970d6e4f2 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs @@ -28,7 +28,7 @@ pub fn guarantee_lifetime<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, span: Span, cause: euv::LoanCause, cmt: mc::cmt<'tcx>, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, _: ty::BorrowKind) -> Result<(),()> { //! Reports error if `loan_region` is larger than S @@ -56,7 +56,7 @@ struct GuaranteeLifetimeContext<'a, 'tcx: 'a> { span: Span, cause: euv::LoanCause, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, cmt_original: mc::cmt<'tcx> } @@ -74,9 +74,9 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { Categorization::Rvalue(..) | Categorization::Local(..) | // L-Local Categorization::Upvar(..) | - Categorization::Deref(_, _, mc::BorrowedPtr(..)) | // L-Deref-Borrowed - Categorization::Deref(_, _, mc::Implicit(..)) | - Categorization::Deref(_, _, mc::UnsafePtr(..)) => { + Categorization::Deref(.., mc::BorrowedPtr(..)) | // L-Deref-Borrowed + Categorization::Deref(.., mc::Implicit(..)) | + Categorization::Deref(.., mc::UnsafePtr(..)) => { self.check_scope(self.scope(cmt)) } @@ -92,17 +92,17 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { } } - fn check_scope(&self, max_scope: ty::Region) -> R { + fn check_scope(&self, max_scope: &'tcx ty::Region) -> R { //! Reports an error if `loan_region` is larger than `max_scope` if !self.bccx.is_subregion_of(self.loan_region, max_scope) { - Err(self.report_error(err_out_of_scope(max_scope, self.loan_region))) + Err(self.report_error(err_out_of_scope(max_scope, self.loan_region, self.cause))) } else { Ok(()) } } - fn scope(&self, cmt: &mc::cmt) -> ty::Region { + fn scope(&self, cmt: &mc::cmt<'tcx>) -> &'tcx ty::Region { //! Returns the maximal region scope for the which the //! lvalue `cmt` is guaranteed to be valid without any //! rooting etc, and presuming `cmt` is not mutated. @@ -112,19 +112,18 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { temp_scope } Categorization::Upvar(..) => { - ty::ReScope(self.item_scope) - } - Categorization::StaticItem => { - ty::ReStatic + self.bccx.tcx.mk_region(ty::ReScope(self.item_scope)) } Categorization::Local(local_id) => { - ty::ReScope(self.bccx.tcx.region_maps.var_scope(local_id)) + self.bccx.tcx.mk_region(ty::ReScope( + self.bccx.tcx.region_maps.var_scope(local_id))) } - Categorization::Deref(_, _, mc::UnsafePtr(..)) => { - ty::ReStatic + Categorization::StaticItem | + Categorization::Deref(.., mc::UnsafePtr(..)) => { + self.bccx.tcx.mk_region(ty::ReStatic) } - Categorization::Deref(_, _, mc::BorrowedPtr(_, r)) | - Categorization::Deref(_, _, mc::Implicit(_, r)) => { + Categorization::Deref(.., mc::BorrowedPtr(_, r)) | + Categorization::Deref(.., mc::Implicit(_, r)) => { r } Categorization::Downcast(ref cmt, _) | @@ -135,7 +134,7 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { } } - fn report_error(&self, code: bckerr_code) { + fn report_error(&self, code: bckerr_code<'tcx>) { self.bccx.report(BckError { cmt: self.cmt_original.clone(), span: self.span, cause: BorrowViolation(self.cause), diff --git a/src/librustc_borrowck/borrowck/gather_loans/mod.rs b/src/librustc_borrowck/borrowck/gather_loans/mod.rs index c982fc091d..763c012a8f 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/mod.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/mod.rs @@ -130,7 +130,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { borrow_id: ast::NodeId, borrow_span: Span, cmt: mc::cmt<'tcx>, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, bk: ty::BorrowKind, loan_cause: euv::LoanCause) { @@ -205,7 +205,7 @@ fn check_aliasability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, alias_cause); Err(()) } - (_, _) => { + (..) => { Ok(()) } } @@ -307,7 +307,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { borrow_span: Span, cmt: mc::cmt<'tcx>, req_kind: ty::BorrowKind, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, cause: euv::LoanCause) { debug!("guarantee_valid(borrow_id={}, cmt={:?}, \ req_mutbl={:?}, loan_region={:?})", @@ -318,7 +318,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { // a loan for the empty region can never be dereferenced, so // it is always safe - if loan_region == ty::ReEmpty { + if *loan_region == ty::ReEmpty { return; } @@ -358,7 +358,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { } RestrictionResult::SafeIf(loan_path, restricted_paths) => { - let loan_scope = match loan_region { + let loan_scope = match *loan_region { ty::ReScope(scope) => scope, ty::ReFree(ref fr) => fr.scope, diff --git a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs index fc17633d63..9fbf1492f5 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs @@ -16,7 +16,6 @@ use rustc::ty; use syntax::ast; use syntax_pos; use errors::DiagnosticBuilder; -use rustc::hir; pub struct MoveErrorCollector<'tcx> { errors: Vec> @@ -117,9 +116,9 @@ fn report_cannot_move_out_of<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, move_from: mc::cmt<'tcx>) -> DiagnosticBuilder<'a> { match move_from.cat { - Categorization::Deref(_, _, mc::BorrowedPtr(..)) | - Categorization::Deref(_, _, mc::Implicit(..)) | - Categorization::Deref(_, _, mc::UnsafePtr(..)) | + Categorization::Deref(.., mc::BorrowedPtr(..)) | + Categorization::Deref(.., mc::Implicit(..)) | + Categorization::Deref(.., mc::UnsafePtr(..)) | Categorization::StaticItem => { let mut err = struct_span_err!(bccx, move_from.span, E0507, "cannot move out of {}", @@ -131,25 +130,27 @@ fn report_cannot_move_out_of<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, err } - Categorization::Interior(ref b, mc::InteriorElement(Kind::Index, _)) => { - let expr = bccx.tcx.map.expect_expr(move_from.id); - if let hir::ExprIndex(..) = expr.node { - let mut err = struct_span_err!(bccx, move_from.span, E0508, - "cannot move out of type `{}`, \ - a non-copy fixed-size array", - b.ty); - err.span_label(move_from.span, &format!("cannot move out of here")); - err - } else { - span_bug!(move_from.span, "this path should not cause illegal move"); + Categorization::Interior(ref b, mc::InteriorElement(ik, _)) => { + match (&b.ty.sty, ik) { + (&ty::TySlice(..), _) | + (_, Kind::Index) => { + let mut err = struct_span_err!(bccx, move_from.span, E0508, + "cannot move out of type `{}`, \ + a non-copy array", + b.ty); + err.span_label(move_from.span, &format!("cannot move out of here")); + err + } + (_, Kind::Pattern) => { + span_bug!(move_from.span, "this path should not cause illegal move"); + } } } Categorization::Downcast(ref b, _) | Categorization::Interior(ref b, mc::InteriorField(_)) => { match b.ty.sty { - ty::TyStruct(def, _) | - ty::TyEnum(def, _) if def.has_dtor() => { + ty::TyAdt(def, _) if def.has_dtor() => { let mut err = struct_span_err!(bccx, move_from.span, E0509, "cannot move out of type `{}`, \ which implements the `Drop` trait", diff --git a/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs b/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs index 3d9df4c8bd..fdcefdc0d4 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs @@ -31,7 +31,7 @@ pub fn compute_restrictions<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, span: Span, cause: euv::LoanCause, cmt: mc::cmt<'tcx>, - loan_region: ty::Region) + loan_region: &'tcx ty::Region) -> RestrictionResult<'tcx> { let ctxt = RestrictionsContext { bccx: bccx, @@ -49,7 +49,7 @@ pub fn compute_restrictions<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, struct RestrictionsContext<'a, 'tcx: 'a> { bccx: &'a BorrowckCtxt<'a, 'tcx>, span: Span, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, cause: euv::LoanCause, } @@ -89,7 +89,7 @@ impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { self.restrict(cmt_base) } - Categorization::Interior(cmt_base, i) => { + Categorization::Interior(cmt_base, interior) => { // R-Field // // Overwriting the base would not change the type of @@ -99,8 +99,34 @@ impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { Categorization::Downcast(_, variant_id) => Some(variant_id), _ => None }; + let interior = interior.cleaned(); + let base_ty = cmt_base.ty; let result = self.restrict(cmt_base); - self.extend(result, &cmt, LpInterior(opt_variant_id, i.cleaned())) + // Borrowing one union field automatically borrows all its fields. + match base_ty.sty { + ty::TyAdt(adt_def, _) if adt_def.is_union() => match result { + RestrictionResult::Safe => RestrictionResult::Safe, + RestrictionResult::SafeIf(base_lp, mut base_vec) => { + for field in &adt_def.struct_variant().fields { + let field = InteriorKind::InteriorField(mc::NamedField(field.name)); + let field_ty = if field == interior { + cmt.ty + } else { + self.bccx.tcx.types.err // Doesn't matter + }; + let sibling_lp_kind = LpExtend(base_lp.clone(), cmt.mutbl, + LpInterior(opt_variant_id, field)); + let sibling_lp = Rc::new(LoanPath::new(sibling_lp_kind, field_ty)); + base_vec.push(sibling_lp); + } + + let lp = new_lp(LpExtend(base_lp, cmt.mutbl, + LpInterior(opt_variant_id, interior))); + RestrictionResult::SafeIf(lp, base_vec) + } + }, + _ => self.extend(result, &cmt, LpInterior(opt_variant_id, interior)) + } } Categorization::StaticItem => { @@ -157,7 +183,7 @@ impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { fn extend(&self, result: RestrictionResult<'tcx>, cmt: &mc::cmt<'tcx>, - elem: LoanPathElem) -> RestrictionResult<'tcx> { + elem: LoanPathElem<'tcx>) -> RestrictionResult<'tcx> { match result { RestrictionResult::Safe => RestrictionResult::Safe, RestrictionResult::SafeIf(base_lp, mut base_vec) => { diff --git a/src/librustc_borrowck/borrowck/mir/dataflow/impls.rs b/src/librustc_borrowck/borrowck/mir/dataflow/impls.rs index 90858e4e8b..55dda8eda3 100644 --- a/src/librustc_borrowck/borrowck/mir/dataflow/impls.rs +++ b/src/librustc_borrowck/borrowck/mir/dataflow/impls.rs @@ -9,16 +9,15 @@ // except according to those terms. use rustc::ty::TyCtxt; -use rustc::mir::repr::{self, Mir}; +use rustc::mir::repr::{self, Mir, Location}; use rustc_data_structures::indexed_vec::Idx; -use super::super::gather_moves::{Location}; use super::super::gather_moves::{MoveOutIndex, MovePathIndex}; use super::super::MoveDataParamEnv; use super::super::DropFlagState; use super::super::drop_flag_effects_for_function_entry; use super::super::drop_flag_effects_for_location; -use super::super::on_all_children_bits; +use super::super::on_lookup_result_bits; use super::{BitDenotation, BlockSets, DataflowOperator}; @@ -252,7 +251,7 @@ impl<'a, 'tcx> BitDenotation for MaybeInitializedLvals<'a, 'tcx> { { drop_flag_effects_for_location( self.tcx, self.mir, ctxt, - Location { block: bb, index: idx }, + Location { block: bb, statement_index: idx }, |path, s| Self::update_bits(sets, path, s) ) } @@ -265,7 +264,7 @@ impl<'a, 'tcx> BitDenotation for MaybeInitializedLvals<'a, 'tcx> { { drop_flag_effects_for_location( self.tcx, self.mir, ctxt, - Location { block: bb, index: statements_len }, + Location { block: bb, statement_index: statements_len }, |path, s| Self::update_bits(sets, path, s) ) } @@ -278,10 +277,9 @@ impl<'a, 'tcx> BitDenotation for MaybeInitializedLvals<'a, 'tcx> { dest_lval: &repr::Lvalue) { // when a call returns successfully, that means we need to set // the bits for that dest_lval to 1 (initialized). - let move_path_index = ctxt.move_data.rev_lookup.find(dest_lval); - on_all_children_bits(self.tcx, self.mir, &ctxt.move_data, - move_path_index, - |mpi| { in_out.add(&mpi); }); + on_lookup_result_bits(self.tcx, self.mir, &ctxt.move_data, + ctxt.move_data.rev_lookup.find(dest_lval), + |mpi| { in_out.add(&mpi); }); } } @@ -314,7 +312,7 @@ impl<'a, 'tcx> BitDenotation for MaybeUninitializedLvals<'a, 'tcx> { { drop_flag_effects_for_location( self.tcx, self.mir, ctxt, - Location { block: bb, index: idx }, + Location { block: bb, statement_index: idx }, |path, s| Self::update_bits(sets, path, s) ) } @@ -327,7 +325,7 @@ impl<'a, 'tcx> BitDenotation for MaybeUninitializedLvals<'a, 'tcx> { { drop_flag_effects_for_location( self.tcx, self.mir, ctxt, - Location { block: bb, index: statements_len }, + Location { block: bb, statement_index: statements_len }, |path, s| Self::update_bits(sets, path, s) ) } @@ -339,11 +337,10 @@ impl<'a, 'tcx> BitDenotation for MaybeUninitializedLvals<'a, 'tcx> { _dest_bb: repr::BasicBlock, dest_lval: &repr::Lvalue) { // when a call returns successfully, that means we need to set - // the bits for that dest_lval to 1 (initialized). - let move_path_index = ctxt.move_data.rev_lookup.find(dest_lval); - on_all_children_bits(self.tcx, self.mir, &ctxt.move_data, - move_path_index, - |mpi| { in_out.remove(&mpi); }); + // the bits for that dest_lval to 0 (initialized). + on_lookup_result_bits(self.tcx, self.mir, &ctxt.move_data, + ctxt.move_data.rev_lookup.find(dest_lval), + |mpi| { in_out.remove(&mpi); }); } } @@ -375,7 +372,7 @@ impl<'a, 'tcx> BitDenotation for DefinitelyInitializedLvals<'a, 'tcx> { { drop_flag_effects_for_location( self.tcx, self.mir, ctxt, - Location { block: bb, index: idx }, + Location { block: bb, statement_index: idx }, |path, s| Self::update_bits(sets, path, s) ) } @@ -388,7 +385,7 @@ impl<'a, 'tcx> BitDenotation for DefinitelyInitializedLvals<'a, 'tcx> { { drop_flag_effects_for_location( self.tcx, self.mir, ctxt, - Location { block: bb, index: statements_len }, + Location { block: bb, statement_index: statements_len }, |path, s| Self::update_bits(sets, path, s) ) } @@ -401,10 +398,9 @@ impl<'a, 'tcx> BitDenotation for DefinitelyInitializedLvals<'a, 'tcx> { dest_lval: &repr::Lvalue) { // when a call returns successfully, that means we need to set // the bits for that dest_lval to 1 (initialized). - let move_path_index = ctxt.move_data.rev_lookup.find(dest_lval); - on_all_children_bits(self.tcx, self.mir, &ctxt.move_data, - move_path_index, - |mpi| { in_out.add(&mpi); }); + on_lookup_result_bits(self.tcx, self.mir, &ctxt.move_data, + ctxt.move_data.rev_lookup.find(dest_lval), + |mpi| { in_out.add(&mpi); }); } } @@ -431,7 +427,7 @@ impl<'a, 'tcx> BitDenotation for MovingOutStatements<'a, 'tcx> { let path_map = &move_data.path_map; let rev_lookup = &move_data.rev_lookup; - let loc = Location { block: bb, index: idx }; + let loc = Location { block: bb, statement_index: idx }; debug!("stmt {:?} at loc {:?} moves out of move_indexes {:?}", stmt, loc, &loc_map[loc]); for move_index in &loc_map[loc] { @@ -449,18 +445,18 @@ impl<'a, 'tcx> BitDenotation for MovingOutStatements<'a, 'tcx> { // assigning into this `lvalue` kills all // MoveOuts from it, and *also* all MoveOuts // for children and associated fragment sets. - let move_path_index = rev_lookup.find(lvalue); - on_all_children_bits(tcx, + on_lookup_result_bits(tcx, mir, move_data, - move_path_index, + rev_lookup.find(lvalue), |mpi| for moi in &path_map[mpi] { assert!(moi.index() < bits_per_block); sets.kill_set.add(&moi); }); } repr::StatementKind::StorageLive(_) | - repr::StatementKind::StorageDead(_) => {} + repr::StatementKind::StorageDead(_) | + repr::StatementKind::Nop => {} } } @@ -473,7 +469,7 @@ impl<'a, 'tcx> BitDenotation for MovingOutStatements<'a, 'tcx> { let (mir, move_data) = (self.mir, &ctxt.move_data); let term = mir[bb].terminator(); let loc_map = &move_data.loc_map; - let loc = Location { block: bb, index: statements_len }; + let loc = Location { block: bb, statement_index: statements_len }; debug!("terminator {:?} at loc {:?} moves out of move_indexes {:?}", term, loc, &loc_map[loc]); let bits_per_block = self.bits_per_block(ctxt); @@ -490,18 +486,17 @@ impl<'a, 'tcx> BitDenotation for MovingOutStatements<'a, 'tcx> { _dest_bb: repr::BasicBlock, dest_lval: &repr::Lvalue) { let move_data = &ctxt.move_data; - let move_path_index = move_data.rev_lookup.find(dest_lval); let bits_per_block = self.bits_per_block(ctxt); let path_map = &move_data.path_map; - on_all_children_bits(self.tcx, - self.mir, - move_data, - move_path_index, - |mpi| for moi in &path_map[mpi] { - assert!(moi.index() < bits_per_block); - in_out.remove(&moi); - }); + on_lookup_result_bits(self.tcx, + self.mir, + move_data, + move_data.rev_lookup.find(dest_lval), + |mpi| for moi in &path_map[mpi] { + assert!(moi.index() < bits_per_block); + in_out.remove(&moi); + }); } } diff --git a/src/librustc_borrowck/borrowck/mir/dataflow/sanity_check.rs b/src/librustc_borrowck/borrowck/mir/dataflow/sanity_check.rs index 9a4865755e..aeb91f06a9 100644 --- a/src/librustc_borrowck/borrowck/mir/dataflow/sanity_check.rs +++ b/src/librustc_borrowck/borrowck/mir/dataflow/sanity_check.rs @@ -16,7 +16,7 @@ use rustc::ty::{self, TyCtxt}; use rustc::mir::repr::{self, Mir}; use rustc_data_structures::indexed_vec::Idx; -use super::super::gather_moves::{MovePathIndex}; +use super::super::gather_moves::{MovePathIndex, LookupResult}; use super::super::MoveDataParamEnv; use super::BitDenotation; use super::DataflowResults; @@ -105,7 +105,8 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, (lvalue, rvalue) } repr::StatementKind::StorageLive(_) | - repr::StatementKind::StorageDead(_) => continue, + repr::StatementKind::StorageDead(_) | + repr::StatementKind::Nop => continue, repr::StatementKind::SetDiscriminant{ .. } => span_bug!(stmt.source_info.span, "sanity_check should run before Deaggregator inserts SetDiscriminant"), @@ -116,20 +117,26 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, repr::BorrowKind::Shared, ref peeking_at_lval) = *rvalue { // Okay, our search is over. - let peek_mpi = move_data.rev_lookup.find(peeking_at_lval); - let bit_state = sets.on_entry.contains(&peek_mpi); - debug!("rustc_peek({:?} = &{:?}) bit_state: {}", - lvalue, peeking_at_lval, bit_state); - if !bit_state { - tcx.sess.span_err(span, &format!("rustc_peek: bit not set")); + match move_data.rev_lookup.find(peeking_at_lval) { + LookupResult::Exact(peek_mpi) => { + let bit_state = sets.on_entry.contains(&peek_mpi); + debug!("rustc_peek({:?} = &{:?}) bit_state: {}", + lvalue, peeking_at_lval, bit_state); + if !bit_state { + tcx.sess.span_err(span, "rustc_peek: bit not set"); + } + } + LookupResult::Parent(..) => { + tcx.sess.span_err(span, "rustc_peek: argument untracked"); + } } return; } else { // Our search should have been over, but the input // does not match expectations of `rustc_peek` for // this sanity_check. - let msg = &format!("rustc_peek: argument expression \ - must be immediate borrow of form `&expr`"); + let msg = "rustc_peek: argument expression \ + must be immediate borrow of form `&expr`"; tcx.sess.span_err(span, msg); } } diff --git a/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs index f6e9484eda..96702b209a 100644 --- a/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs +++ b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs @@ -9,14 +9,15 @@ // except according to those terms. use indexed_set::IdxSetBuf; -use super::gather_moves::{MoveData, MovePathIndex, MovePathContent, Location}; +use super::gather_moves::{MoveData, MovePathIndex, LookupResult}; use super::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals}; use super::dataflow::{DataflowResults}; use super::{drop_flag_effects_for_location, on_all_children_bits}; +use super::on_lookup_result_bits; use super::{DropFlagState, MoveDataParamEnv}; use super::patch::MirPatch; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::subst::{Subst, Substs, VecPerParamSpace}; +use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::mir::repr::*; use rustc::mir::transform::{Pass, MirPass, MirSource}; use rustc::middle::const_val::ConstVal; @@ -26,6 +27,7 @@ use rustc_data_structures::indexed_vec::Idx; use syntax_pos::Span; use std::fmt; +use std::iter; use std::u32; pub struct ElaborateDrops; @@ -41,7 +43,7 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops { } let id = src.item_id(); let param_env = ty::ParameterEnvironment::for_item(tcx, id); - let move_data = MoveData::gather_moves(mir, tcx); + let move_data = MoveData::gather_moves(mir, tcx, ¶m_env); let elaborate_patch = { let mir = &*mir; let env = MoveDataParamEnv { @@ -146,9 +148,9 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { dead: self.flow_uninits.sets().on_entry_set_for(loc.block.index()) .to_owned(), }; - for stmt in 0..loc.index { + for stmt in 0..loc.statement_index { data.apply_location(self.tcx, self.mir, self.env, - Location { block: loc.block, index: stmt }); + Location { block: loc.block, statement_index: stmt }); } data } @@ -183,31 +185,11 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { fn path_needs_drop(&self, path: MovePathIndex) -> bool { - match self.move_data().move_paths[path].content { - MovePathContent::Lvalue(ref lvalue) => { - let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx); - debug!("path_needs_drop({:?}, {:?} : {:?})", path, lvalue, ty); + let lvalue = &self.move_data().move_paths[path].lvalue; + let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx); + debug!("path_needs_drop({:?}, {:?} : {:?})", path, lvalue, ty); - self.tcx.type_needs_drop_given_env(ty, self.param_env()) - } - _ => false - } - } - - /// Returns whether this lvalue is tracked by drop elaboration. This - /// includes all lvalues, except these (1.) behind references or arrays, - /// or (2.) behind ADT's with a Drop impl. - fn lvalue_is_tracked(&self, lv: &Lvalue<'tcx>) -> bool - { - // `lvalue_contents_drop_state_cannot_differ` only compares - // the `lv` to its immediate contents, while this recursively - // follows parent chain formed by `base` of each projection. - if let &Lvalue::Projection(ref data) = lv { - !super::lvalue_contents_drop_state_cannot_differ(self.tcx, self.mir, &data.base) && - self.lvalue_is_tracked(&data.base) - } else { - true - } + self.tcx.type_needs_drop_given_env(ty, self.param_env()) } fn collect_drop_flags(&mut self) @@ -220,19 +202,29 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { _ => continue }; - if !self.lvalue_is_tracked(location) { - continue - } - let init_data = self.initialization_data_at(Location { block: bb, - index: data.statements.len() + statement_index: data.statements.len() }); let path = self.move_data().rev_lookup.find(location); - debug!("collect_drop_flags: {:?}, lv {:?} (index {:?})", + debug!("collect_drop_flags: {:?}, lv {:?} ({:?})", bb, location, path); + let path = match path { + LookupResult::Exact(e) => e, + LookupResult::Parent(None) => continue, + LookupResult::Parent(Some(parent)) => { + let (_maybe_live, maybe_dead) = init_data.state(parent); + if maybe_dead { + span_bug!(terminator.source_info.span, + "drop of untracked, uninitialized value {:?}, lv {:?} ({:?})", + bb, location, path); + } + continue + } + }; + on_all_children_bits(self.tcx, self.mir, self.move_data(), path, |child| { if self.path_needs_drop(child) { let (maybe_live, maybe_dead) = init_data.state(child); @@ -249,27 +241,34 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { fn elaborate_drops(&mut self) { for (bb, data) in self.mir.basic_blocks().iter_enumerated() { - let loc = Location { block: bb, index: data.statements.len() }; + let loc = Location { block: bb, statement_index: data.statements.len() }; let terminator = data.terminator(); let resume_block = self.patch.resume_block(); match terminator.kind { TerminatorKind::Drop { ref location, target, unwind } => { let init_data = self.initialization_data_at(loc); - let path = self.move_data().rev_lookup.find(location); - self.elaborate_drop(&DropCtxt { - source_info: terminator.source_info, - is_cleanup: data.is_cleanup, - init_data: &init_data, - lvalue: location, - path: path, - succ: target, - unwind: if data.is_cleanup { - None - } else { - Some(Option::unwrap_or(unwind, resume_block)) + match self.move_data().rev_lookup.find(location) { + LookupResult::Exact(path) => { + self.elaborate_drop(&DropCtxt { + source_info: terminator.source_info, + is_cleanup: data.is_cleanup, + init_data: &init_data, + lvalue: location, + path: path, + succ: target, + unwind: if data.is_cleanup { + None + } else { + Some(Option::unwrap_or(unwind, resume_block)) + } + }, bb); } - }, bb); + LookupResult::Parent(..) => { + span_bug!(terminator.source_info.span, + "drop of untracked value {:?}", bb); + } + } } TerminatorKind::DropAndReplace { ref location, ref value, target, unwind } => @@ -335,35 +334,37 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { is_cleanup: data.is_cleanup, }); - if !self.lvalue_is_tracked(location) { - // drop and replace behind a pointer/array/whatever. The location - // must be initialized. - debug!("elaborate_drop_and_replace({:?}) - untracked", terminator); - self.patch.patch_terminator(bb, TerminatorKind::Drop { - location: location.clone(), - target: target, - unwind: Some(unwind) - }); - } else { - debug!("elaborate_drop_and_replace({:?}) - tracked", terminator); - let init_data = self.initialization_data_at(loc); - let path = self.move_data().rev_lookup.find(location); - - self.elaborate_drop(&DropCtxt { - source_info: terminator.source_info, - is_cleanup: data.is_cleanup, - init_data: &init_data, - lvalue: location, - path: path, - succ: target, - unwind: Some(unwind) - }, bb); - on_all_children_bits(self.tcx, self.mir, self.move_data(), path, |child| { - self.set_drop_flag(Location { block: target, index: 0 }, - child, DropFlagState::Present); - self.set_drop_flag(Location { block: unwind, index: 0 }, - child, DropFlagState::Present); - }); + match self.move_data().rev_lookup.find(location) { + LookupResult::Exact(path) => { + debug!("elaborate_drop_and_replace({:?}) - tracked {:?}", terminator, path); + let init_data = self.initialization_data_at(loc); + + self.elaborate_drop(&DropCtxt { + source_info: terminator.source_info, + is_cleanup: data.is_cleanup, + init_data: &init_data, + lvalue: location, + path: path, + succ: target, + unwind: Some(unwind) + }, bb); + on_all_children_bits(self.tcx, self.mir, self.move_data(), path, |child| { + self.set_drop_flag(Location { block: target, statement_index: 0 }, + child, DropFlagState::Present); + self.set_drop_flag(Location { block: unwind, statement_index: 0 }, + child, DropFlagState::Present); + }); + } + LookupResult::Parent(parent) => { + // drop and replace behind a pointer/array/whatever. The location + // must be initialized. + debug!("elaborate_drop_and_replace({:?}) - untracked {:?}", terminator, parent); + self.patch.patch_terminator(bb, TerminatorKind::Drop { + location: location.clone(), + target: target, + unwind: Some(unwind) + }); + } } } @@ -445,10 +446,9 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { substs: &'tcx Substs<'tcx>) -> Vec<(Lvalue<'tcx>, Option)> { - let move_paths = &self.move_data().move_paths; variant.fields.iter().enumerate().map(|(i, f)| { let subpath = - super::move_path_children_matching(move_paths, variant_path, |p| { + super::move_path_children_matching(self.move_data(), variant_path, |p| { match p { &Projection { elem: ProjectionElem::Field(idx, _), .. @@ -579,7 +579,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { let fields = tys.iter().enumerate().map(|(i, &ty)| { (c.lvalue.clone().field(Field::new(i), ty), super::move_path_children_matching( - &self.move_data().move_paths, c.path, |proj| match proj { + self.move_data(), c.path, |proj| match proj { &Projection { elem: ProjectionElem::Field(f, _), .. } => f.index() == i, @@ -597,7 +597,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { debug!("open_drop_for_box({:?}, {:?})", c, ty); let interior_path = super::move_path_children_matching( - &self.move_data().move_paths, c.path, |proj| match proj { + self.move_data(), c.path, |proj| match proj { &Projection { elem: ProjectionElem::Deref, .. } => true, _ => false }).unwrap(); @@ -624,10 +624,8 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { variant_index: usize) -> BasicBlock { - let move_paths = &self.move_data().move_paths; - let subpath = super::move_path_children_matching( - move_paths, c.path, |proj| match proj { + self.move_data(), c.path, |proj| match proj { &Projection { elem: ProjectionElem::Downcast(_, idx), .. } => idx == variant_index, @@ -708,7 +706,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { fn open_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock { let ty = c.lvalue.ty(self.mir, self.tcx).to_ty(self.tcx); match ty.sty { - ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { + ty::TyAdt(def, substs) => { self.open_drop_for_adt(c, def, substs) } ty::TyTuple(tys) | ty::TyClosure(_, ty::ClosureSubsts { @@ -741,7 +739,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { let drop_block = self.drop_block(c); if update_drop_flag { self.set_drop_flag( - Location { block: drop_block, index: 0 }, + Location { block: drop_block, statement_index: 0 }, c.path, DropFlagState::Absent ); @@ -859,10 +857,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { let unit_temp = Lvalue::Temp(self.patch.new_temp(tcx.mk_nil())); let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem) .unwrap_or_else(|e| tcx.sess.fatal(&e)); - let substs = tcx.mk_substs(Substs::new( - VecPerParamSpace::new(vec![], vec![], vec![ty]), - VecPerParamSpace::new(vec![], vec![], vec![]) - )); + let substs = Substs::new(tcx, iter::once(Kind::from(ty))); let fty = tcx.lookup_item_type(free_func).ty.subst(tcx, substs); self.patch.new_block(BasicBlockData { @@ -895,7 +890,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { let ty = c.lvalue.ty(self.mir, self.tcx).to_ty(self.tcx); match ty.sty { - ty::TyStruct(def, _) | ty::TyEnum(def, _) => { + ty::TyAdt(def, _) => { if def.has_dtor() { self.tcx.sess.span_warn( c.source_info.span, @@ -927,7 +922,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { } fn drop_flags_on_init(&mut self) { - let loc = Location { block: START_BLOCK, index: 0 }; + let loc = Location { block: START_BLOCK, statement_index: 0 }; let span = self.patch.source_info_for_location(self.mir, loc).span; let false_ = self.constant_bool(span, false); for flag in self.drop_flags.values() { @@ -942,9 +937,9 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { } = data.terminator().kind { assert!(!self.patch.is_patched(bb)); - let loc = Location { block: tgt, index: 0 }; + let loc = Location { block: tgt, statement_index: 0 }; let path = self.move_data().rev_lookup.find(lv); - on_all_children_bits( + on_lookup_result_bits( self.tcx, self.mir, self.move_data(), path, |child| self.set_drop_flag(loc, child, DropFlagState::Present) ); @@ -953,7 +948,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { } fn drop_flags_for_args(&mut self) { - let loc = Location { block: START_BLOCK, index: 0 }; + let loc = Location { block: START_BLOCK, statement_index: 0 }; super::drop_flag_effects_for_function_entry( self.tcx, self.mir, self.env, |path, ds| { self.set_drop_flag(loc, path, ds); @@ -993,7 +988,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { } } } - let loc = Location { block: bb, index: i }; + let loc = Location { block: bb, statement_index: i }; super::drop_flag_effects_for_location( self.tcx, self.mir, self.env, loc, |path, ds| { if ds == DropFlagState::Absent || allow_initializations { @@ -1011,9 +1006,9 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { } = data.terminator().kind { assert!(!self.patch.is_patched(bb)); - let loc = Location { block: bb, index: data.statements.len() }; + let loc = Location { block: bb, statement_index: data.statements.len() }; let path = self.move_data().rev_lookup.find(lv); - on_all_children_bits( + on_lookup_result_bits( self.tcx, self.mir, self.move_data(), path, |child| self.set_drop_flag(loc, child, DropFlagState::Present) ); diff --git a/src/librustc_borrowck/borrowck/mir/gather_moves.rs b/src/librustc_borrowck/borrowck/mir/gather_moves.rs index 8ae40e71be..6346c1e588 100644 --- a/src/librustc_borrowck/borrowck/mir/gather_moves.rs +++ b/src/librustc_borrowck/borrowck/mir/gather_moves.rs @@ -9,16 +9,17 @@ // except according to those terms. -use rustc::ty::TyCtxt; +use rustc::ty::{self, TyCtxt, ParameterEnvironment}; use rustc::mir::repr::*; use rustc::util::nodemap::FnvHashMap; -use rustc_data_structures::indexed_vec::{Idx, IndexVec}; +use rustc_data_structures::indexed_vec::{IndexVec}; + +use syntax::codemap::DUMMY_SP; -use std::cell::{Cell}; use std::collections::hash_map::Entry; use std::fmt; -use std::iter; -use std::ops::Index; +use std::mem; +use std::ops::{Index, IndexMut}; use super::abs_domain::{AbstractElem, Lift}; @@ -28,17 +29,15 @@ use super::abs_domain::{AbstractElem, Lift}; // ensure that other code does not accidentally access `index.0` // (which is likely to yield a subtle off-by-one error). mod indexes { + use std::fmt; use core::nonzero::NonZero; use rustc_data_structures::indexed_vec::Idx; macro_rules! new_index { - ($Index:ident) => { - #[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] + ($Index:ident, $debug_name:expr) => { + #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct $Index(NonZero); - impl $Index { - } - impl Idx for $Index { fn new(idx: usize) -> Self { unsafe { $Index(NonZero::new(idx + 1)) } @@ -47,14 +46,20 @@ mod indexes { *self.0 - 1 } } + + impl fmt::Debug for $Index { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "{}{}", $debug_name, self.index()) + } + } } } /// Index into MovePathData.move_paths - new_index!(MovePathIndex); + new_index!(MovePathIndex, "mp"); /// Index into MoveData.moves. - new_index!(MoveOutIndex); + new_index!(MoveOutIndex, "mo"); } pub use self::indexes::MovePathIndex; @@ -62,7 +67,7 @@ pub use self::indexes::MoveOutIndex; impl self::indexes::MoveOutIndex { pub fn move_path_index(&self, move_data: &MoveData) -> MovePathIndex { - move_data.moves[self.index()].path + move_data.moves[*self].path } } @@ -83,40 +88,7 @@ pub struct MovePath<'tcx> { pub next_sibling: Option, pub first_child: Option, pub parent: Option, - pub content: MovePathContent<'tcx>, -} - -/// MovePaths usually represent a single l-value. The exceptions are -/// forms that arise due to erroneous input code: static data holds -/// l-values that we cannot actually move out of. Therefore we map -/// statics to a special marker value (`MovePathContent::Static`) -/// representing an invalid origin. -#[derive(Clone, Debug)] -pub enum MovePathContent<'tcx> { - Lvalue(Lvalue<'tcx>), - Static, -} - -/// During construction of the MovePath's, we use PreMovePath to -/// represent accumulated state while we are gathering up all the -/// children of each path. -#[derive(Clone)] -struct PreMovePath<'tcx> { - pub next_sibling: Option, - pub first_child: Cell>, - pub parent: Option, - pub content: MovePathContent<'tcx>, -} - -impl<'tcx> PreMovePath<'tcx> { - fn into_move_path(self) -> MovePath<'tcx> { - MovePath { - next_sibling: self.next_sibling, - parent: self.parent, - content: self.content, - first_child: self.first_child.get(), - } - } + pub lvalue: Lvalue<'tcx>, } impl<'tcx> fmt::Debug for MovePath<'tcx> { @@ -131,52 +103,50 @@ impl<'tcx> fmt::Debug for MovePath<'tcx> { if let Some(next_sibling) = self.next_sibling { write!(w, " next_sibling: {:?}", next_sibling)?; } - write!(w, " content: {:?} }}", self.content) + write!(w, " lvalue: {:?} }}", self.lvalue) } } #[derive(Debug)] pub struct MoveData<'tcx> { - pub move_paths: MovePathData<'tcx>, - pub moves: Vec, - pub loc_map: LocMap, - pub path_map: PathMap, + pub move_paths: IndexVec>, + pub moves: IndexVec, + /// Each Location `l` is mapped to the MoveOut's that are effects + /// of executing the code at `l`. (There can be multiple MoveOut's + /// for a given `l` because each MoveOut is associated with one + /// particular path being moved.) + pub loc_map: LocationMap>, + pub path_map: IndexVec>, pub rev_lookup: MovePathLookup<'tcx>, } #[derive(Debug)] -pub struct LocMap { +pub struct LocationMap { /// Location-indexed (BasicBlock for outer index, index within BB - /// for inner index) map to list of MoveOutIndex's. - /// - /// Each Location `l` is mapped to the MoveOut's that are effects - /// of executing the code at `l`. (There can be multiple MoveOut's - /// for a given `l` because each MoveOut is associated with one - /// particular path being moved.) - map: Vec>>, + /// for inner index) map. + map: IndexVec>, } -impl Index for LocMap { - type Output = [MoveOutIndex]; +impl Index for LocationMap { + type Output = T; fn index(&self, index: Location) -> &Self::Output { - assert!(index.block.index() < self.map.len()); - assert!(index.index < self.map[index.block.index()].len()); - &self.map[index.block.index()][index.index] + &self.map[index.block][index.statement_index] } } -#[derive(Debug)] -pub struct PathMap { - /// Path-indexed map to list of MoveOutIndex's. - /// - /// Each Path `p` is mapped to the MoveOut's that move out of `p`. - map: Vec>, +impl IndexMut for LocationMap { + fn index_mut(&mut self, index: Location) -> &mut Self::Output { + &mut self.map[index.block][index.statement_index] + } } -impl Index for PathMap { - type Output = [MoveOutIndex]; - fn index(&self, index: MovePathIndex) -> &Self::Output { - &self.map[index.index()] +impl LocationMap where T: Default + Clone { + fn new(mir: &Mir) -> Self { + LocationMap { + map: mir.basic_blocks().iter().map(|block| { + vec![T::default(); block.statements.len()+1] + }).collect() + } } } @@ -196,598 +166,389 @@ pub struct MoveOut { impl fmt::Debug for MoveOut { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "p{}@{:?}", self.path.index(), self.source) + write!(fmt, "{:?}@{:?}", self.path, self.source) } } -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub struct Location { - /// block where action is located - pub block: BasicBlock, - /// index within above block; statement when < statments.len) or - /// the terminator (when = statements.len). - pub index: usize, -} - -impl fmt::Debug for Location { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{:?}[{}]", self.block, self.index) - } -} - -#[derive(Debug)] -pub struct MovePathData<'tcx> { - move_paths: Vec>, -} - -impl<'tcx> MovePathData<'tcx> { - pub fn len(&self) -> usize { self.move_paths.len() } -} - -impl<'tcx> Index for MovePathData<'tcx> { - type Output = MovePath<'tcx>; - fn index(&self, i: MovePathIndex) -> &MovePath<'tcx> { - &self.move_paths[i.index()] - } -} - -struct MovePathDataBuilder<'tcx> { - pre_move_paths: Vec>, - rev_lookup: MovePathLookup<'tcx>, -} - /// Tables mapping from an l-value to its MovePathIndex. #[derive(Debug)] pub struct MovePathLookup<'tcx> { - vars: IndexVec>, - temps: IndexVec>, - args: IndexVec>, + vars: IndexVec, + temps: IndexVec, + args: IndexVec, /// The move path representing the return value is constructed /// lazily when we first encounter it in the input MIR. return_ptr: Option, - /// A single move path (representing any static data referenced) - /// is constructed lazily when we first encounter statics in the - /// input MIR. - statics: Option, - /// projections are made from a base-lvalue and a projection /// elem. The base-lvalue will have a unique MovePathIndex; we use /// the latter as the index into the outer vector (narrowing /// subsequent search so that it is solely relative to that /// base-lvalue). For the remaining lookup, we map the projection /// elem to the associated MovePathIndex. - projections: Vec, MovePathIndex>>, - - /// Tracks the next index to allocate during construction of the - /// MovePathData. Unused after MovePathData is fully constructed. - next_index: MovePathIndex, -} - -trait FillTo { - type T; - fn fill_to_with(&mut self, idx: usize, x: Self::T); - fn fill_to(&mut self, idx: usize) where Self::T: Default { - self.fill_to_with(idx, Default::default()) - } -} -impl FillTo for Vec { - type T = T; - fn fill_to_with(&mut self, idx: usize, x: T) { - if idx >= self.len() { - let delta = idx + 1 - self.len(); - assert_eq!(idx + 1, self.len() + delta); - self.extend(iter::repeat(x).take(delta)) + projections: FnvHashMap<(MovePathIndex, AbstractElem<'tcx>), MovePathIndex> +} + +struct MoveDataBuilder<'a, 'tcx: 'a> { + mir: &'a Mir<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &'a ParameterEnvironment<'tcx>, + data: MoveData<'tcx>, +} + +pub enum MovePathError { + IllegalMove, + UnionMove { path: MovePathIndex }, +} + +impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { + fn new(mir: &'a Mir<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &'a ParameterEnvironment<'tcx>) + -> Self { + let mut move_paths = IndexVec::new(); + let mut path_map = IndexVec::new(); + + MoveDataBuilder { + mir: mir, + tcx: tcx, + param_env: param_env, + data: MoveData { + moves: IndexVec::new(), + loc_map: LocationMap::new(mir), + rev_lookup: MovePathLookup { + vars: mir.var_decls.indices().map(Lvalue::Var).map(|v| { + Self::new_move_path(&mut move_paths, &mut path_map, None, v) + }).collect(), + temps: mir.temp_decls.indices().map(Lvalue::Temp).map(|t| { + Self::new_move_path(&mut move_paths, &mut path_map, None, t) + }).collect(), + args: mir.arg_decls.indices().map(Lvalue::Arg).map(|a| { + Self::new_move_path(&mut move_paths, &mut path_map, None, a) + }).collect(), + return_ptr: None, + projections: FnvHashMap(), + }, + move_paths: move_paths, + path_map: path_map, + } } - debug_assert!(idx < self.len()); } -} - -#[derive(Clone, Debug)] -enum LookupKind { Generate, Reuse } -#[derive(Clone, Debug)] -struct Lookup(LookupKind, T); - -impl Lookup { - fn index(&self) -> usize { (self.1).index() } -} -impl<'tcx> MovePathLookup<'tcx> { - fn new(mir: &Mir) -> Self { - MovePathLookup { - vars: IndexVec::from_elem(None, &mir.var_decls), - temps: IndexVec::from_elem(None, &mir.temp_decls), - args: IndexVec::from_elem(None, &mir.arg_decls), - statics: None, - return_ptr: None, - projections: vec![], - next_index: MovePathIndex::new(0), + fn new_move_path(move_paths: &mut IndexVec>, + path_map: &mut IndexVec>, + parent: Option, + lvalue: Lvalue<'tcx>) + -> MovePathIndex + { + let move_path = move_paths.push(MovePath { + next_sibling: None, + first_child: None, + parent: parent, + lvalue: lvalue + }); + + if let Some(parent) = parent { + let next_sibling = + mem::replace(&mut move_paths[parent].first_child, Some(move_path)); + move_paths[move_path].next_sibling = next_sibling; } - } - fn next_index(next: &mut MovePathIndex) -> MovePathIndex { - let i = *next; - *next = MovePathIndex::new(i.index() + 1); - i + let path_map_ent = path_map.push(vec![]); + assert_eq!(path_map_ent, move_path); + move_path } - fn lookup_or_generate(vec: &mut IndexVec>, - idx: I, - next_index: &mut MovePathIndex) - -> Lookup { - let entry = &mut vec[idx]; - match *entry { - None => { - let i = Self::next_index(next_index); - *entry = Some(i); - Lookup(LookupKind::Generate, i) - } - Some(entry_idx) => { - Lookup(LookupKind::Reuse, entry_idx) + /// This creates a MovePath for a given lvalue, returning an `MovePathError` + /// if that lvalue can't be moved from. + /// + /// NOTE: lvalues behind references *do not* get a move path, which is + /// problematic for borrowck. + /// + /// Maybe we should have seperate "borrowck" and "moveck" modes. + fn move_path_for(&mut self, lval: &Lvalue<'tcx>) + -> Result + { + debug!("lookup({:?})", lval); + match *lval { + Lvalue::Var(var) => Ok(self.data.rev_lookup.vars[var]), + Lvalue::Arg(arg) => Ok(self.data.rev_lookup.args[arg]), + Lvalue::Temp(temp) => Ok(self.data.rev_lookup.temps[temp]), + // error: can't move out of a static + Lvalue::Static(..) => Err(MovePathError::IllegalMove), + Lvalue::ReturnPointer => match self.data.rev_lookup.return_ptr { + Some(ptr) => Ok(ptr), + ref mut ptr @ None => { + let path = Self::new_move_path( + &mut self.data.move_paths, + &mut self.data.path_map, + None, + lval.clone()); + *ptr = Some(path); + Ok(path) + } + }, + Lvalue::Projection(ref proj) => { + self.move_path_for_projection(lval, proj) } } } - fn lookup_var(&mut self, var_idx: Var) -> Lookup { - Self::lookup_or_generate(&mut self.vars, - var_idx, - &mut self.next_index) - } - - fn lookup_temp(&mut self, temp_idx: Temp) -> Lookup { - Self::lookup_or_generate(&mut self.temps, - temp_idx, - &mut self.next_index) - } - - fn lookup_arg(&mut self, arg_idx: Arg) -> Lookup { - Self::lookup_or_generate(&mut self.args, - arg_idx, - &mut self.next_index) - } - - fn lookup_static(&mut self) -> Lookup { - match self.statics { - Some(mpi) => { - Lookup(LookupKind::Reuse, mpi) - } - ref mut ret @ None => { - let mpi = Self::next_index(&mut self.next_index); - *ret = Some(mpi); - Lookup(LookupKind::Generate, mpi) + fn create_move_path(&mut self, lval: &Lvalue<'tcx>) { + // This is an assignment, not a move, so this not being a valid + // move path is OK. + let _ = self.move_path_for(lval); + } + + fn move_path_for_projection(&mut self, + lval: &Lvalue<'tcx>, + proj: &LvalueProjection<'tcx>) + -> Result + { + let base = try!(self.move_path_for(&proj.base)); + let lv_ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx); + match lv_ty.sty { + // error: can't move out of borrowed content + ty::TyRef(..) | ty::TyRawPtr(..) => return Err(MovePathError::IllegalMove), + // error: can't move out of struct with destructor + ty::TyAdt(adt, _) if adt.has_dtor() => + return Err(MovePathError::IllegalMove), + // move out of union - always move the entire union + ty::TyAdt(adt, _) if adt.is_union() => + return Err(MovePathError::UnionMove { path: base }), + // error: can't move out of a slice + ty::TySlice(..) => + return Err(MovePathError::IllegalMove), + ty::TyArray(..) => match proj.elem { + // error: can't move out of an array + ProjectionElem::Index(..) => return Err(MovePathError::IllegalMove), + _ => { + // FIXME: still badly broken + } + }, + _ => {} + }; + match self.data.rev_lookup.projections.entry((base, proj.elem.lift())) { + Entry::Occupied(ent) => Ok(*ent.get()), + Entry::Vacant(ent) => { + let path = Self::new_move_path( + &mut self.data.move_paths, + &mut self.data.path_map, + Some(base), + lval.clone() + ); + ent.insert(path); + Ok(path) } } } - fn lookup_return_pointer(&mut self) -> Lookup { - match self.return_ptr { - Some(mpi) => { - Lookup(LookupKind::Reuse, mpi) + fn finalize(self) -> MoveData<'tcx> { + debug!("{}", { + debug!("moves for {:?}:", self.mir.span); + for (j, mo) in self.data.moves.iter_enumerated() { + debug!(" {:?} = {:?}", j, mo); } - ref mut ret @ None => { - let mpi = Self::next_index(&mut self.next_index); - *ret = Some(mpi); - Lookup(LookupKind::Generate, mpi) + debug!("move paths for {:?}:", self.mir.span); + for (j, path) in self.data.move_paths.iter_enumerated() { + debug!(" {:?} = {:?}", j, path); } - } + "done dumping moves" + }); + self.data } +} - fn lookup_proj(&mut self, - proj: &LvalueProjection<'tcx>, - base: MovePathIndex) -> Lookup { - let MovePathLookup { ref mut projections, - ref mut next_index, .. } = *self; - projections.fill_to(base.index()); - match projections[base.index()].entry(proj.elem.lift()) { - Entry::Occupied(ent) => { - Lookup(LookupKind::Reuse, *ent.get()) - } - Entry::Vacant(ent) => { - let mpi = Self::next_index(next_index); - ent.insert(mpi); - Lookup(LookupKind::Generate, mpi) - } - } - } +#[derive(Copy, Clone, Debug)] +pub enum LookupResult { + Exact(MovePathIndex), + Parent(Option) } impl<'tcx> MovePathLookup<'tcx> { // Unlike the builder `fn move_path_for` below, this lookup // alternative will *not* create a MovePath on the fly for an - // unknown l-value; it will simply panic. - pub fn find(&self, lval: &Lvalue<'tcx>) -> MovePathIndex { + // unknown l-value, but will rather return the nearest available + // parent. + pub fn find(&self, lval: &Lvalue<'tcx>) -> LookupResult { match *lval { - Lvalue::Var(var) => self.vars[var].unwrap(), - Lvalue::Temp(temp) => self.temps[temp].unwrap(), - Lvalue::Arg(arg) => self.args[arg].unwrap(), - Lvalue::Static(ref _def_id) => self.statics.unwrap(), - Lvalue::ReturnPointer => self.return_ptr.unwrap(), + Lvalue::Var(var) => LookupResult::Exact(self.vars[var]), + Lvalue::Temp(temp) => LookupResult::Exact(self.temps[temp]), + Lvalue::Arg(arg) => LookupResult::Exact(self.args[arg]), + Lvalue::Static(..) => LookupResult::Parent(None), + Lvalue::ReturnPointer => LookupResult::Exact(self.return_ptr.unwrap()), Lvalue::Projection(ref proj) => { - let base_index = self.find(&proj.base); - self.projections[base_index.index()][&proj.elem.lift()] + match self.find(&proj.base) { + LookupResult::Exact(base_path) => { + match self.projections.get(&(base_path, proj.elem.lift())) { + Some(&subpath) => LookupResult::Exact(subpath), + None => LookupResult::Parent(Some(base_path)) + } + } + inexact => inexact + } } } } } -impl<'tcx> MovePathDataBuilder<'tcx> { - fn lookup(&mut self, lval: &Lvalue<'tcx>) -> Lookup { - let proj = match *lval { - Lvalue::Var(var_idx) => - return self.rev_lookup.lookup_var(var_idx), - Lvalue::Temp(temp_idx) => - return self.rev_lookup.lookup_temp(temp_idx), - Lvalue::Arg(arg_idx) => - return self.rev_lookup.lookup_arg(arg_idx), - Lvalue::Static(_def_id) => - return self.rev_lookup.lookup_static(), - Lvalue::ReturnPointer => - return self.rev_lookup.lookup_return_pointer(), - Lvalue::Projection(ref proj) => { - proj - } - }; - - let base_index = self.move_path_for(&proj.base); - self.rev_lookup.lookup_proj(proj, base_index) - } - - fn create_move_path(&mut self, lval: &Lvalue<'tcx>) { - // Create MovePath for `lval`, discarding returned index. - self.move_path_for(lval); +impl<'a, 'tcx> MoveData<'tcx> { + pub fn gather_moves(mir: &Mir<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &ParameterEnvironment<'tcx>) + -> Self { + gather_moves(mir, tcx, param_env) } +} - fn move_path_for(&mut self, lval: &Lvalue<'tcx>) -> MovePathIndex { - debug!("move_path_for({:?})", lval); - - let lookup: Lookup = self.lookup(lval); - - // `lookup` is either the previously assigned index or a - // newly-allocated one. - debug_assert!(lookup.index() <= self.pre_move_paths.len()); - - if let Lookup(LookupKind::Generate, mpi) = lookup { - let parent; - let sibling; - // tracks whether content is Some non-static; statics map to None. - let content: Option<&Lvalue<'tcx>>; - - match *lval { - Lvalue::Static(_) => { - content = None; - sibling = None; - parent = None; - } - - Lvalue::Var(_) | Lvalue::Temp(_) | Lvalue::Arg(_) | - Lvalue::ReturnPointer => { - content = Some(lval); - sibling = None; - parent = None; - } - Lvalue::Projection(ref proj) => { - content = Some(lval); - - // Here, install new MovePath as new first_child. - - // Note: `parent` previously allocated (Projection - // case of match above established this). - let idx = self.move_path_for(&proj.base); - parent = Some(idx); - - let parent_move_path = &mut self.pre_move_paths[idx.index()]; - - // At last: Swap in the new first_child. - sibling = parent_move_path.first_child.get(); - parent_move_path.first_child.set(Some(mpi)); - } - }; - - let content = match content { - Some(lval) => MovePathContent::Lvalue(lval.clone()), - None => MovePathContent::Static, - }; - - let move_path = PreMovePath { - next_sibling: sibling, - parent: parent, - content: content, - first_child: Cell::new(None), - }; +fn gather_moves<'a, 'tcx>(mir: &Mir<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &ParameterEnvironment<'tcx>) + -> MoveData<'tcx> { + let mut builder = MoveDataBuilder::new(mir, tcx, param_env); - self.pre_move_paths.push(move_path); + for (bb, block) in mir.basic_blocks().iter_enumerated() { + for (i, stmt) in block.statements.iter().enumerate() { + let source = Location { block: bb, statement_index: i }; + builder.gather_statement(source, stmt); } - return lookup.1; - } -} - -impl<'a, 'tcx> MoveData<'tcx> { - pub fn gather_moves(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self { - gather_moves(mir, tcx) + let terminator_loc = Location { + block: bb, + statement_index: block.statements.len() + }; + builder.gather_terminator(terminator_loc, block.terminator()); } -} -#[derive(Debug)] -enum StmtKind { - Use, Repeat, Cast, BinaryOp, UnaryOp, Box, - Aggregate, Drop, CallFn, CallArg, Return, If, + builder.finalize() } -fn gather_moves<'a, 'tcx>(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> MoveData<'tcx> { - use self::StmtKind as SK; - - let bb_count = mir.basic_blocks().len(); - let mut moves = vec![]; - let mut loc_map: Vec<_> = iter::repeat(Vec::new()).take(bb_count).collect(); - let mut path_map = Vec::new(); - - // this is mutable only because we will move it to and fro' the - // BlockContexts constructed on each iteration. (Moving is more - // straight-forward than mutable borrows in this instance.) - let mut builder = MovePathDataBuilder { - pre_move_paths: Vec::new(), - rev_lookup: MovePathLookup::new(mir), - }; - - // Before we analyze the program text, we create the MovePath's - // for all of the vars, args, and temps. (This enforces a basic - // property that even if the MIR body doesn't contain any - // references to a var/arg/temp, it will still be a valid - // operation to lookup the MovePath associated with it.) - assert!(mir.var_decls.len() <= ::std::u32::MAX as usize); - assert!(mir.arg_decls.len() <= ::std::u32::MAX as usize); - assert!(mir.temp_decls.len() <= ::std::u32::MAX as usize); - for var in mir.var_decls.indices() { - let path_idx = builder.move_path_for(&Lvalue::Var(var)); - path_map.fill_to(path_idx.index()); - } - for arg in mir.arg_decls.indices() { - let path_idx = builder.move_path_for(&Lvalue::Arg(arg)); - path_map.fill_to(path_idx.index()); - } - for temp in mir.temp_decls.indices() { - let path_idx = builder.move_path_for(&Lvalue::Temp(temp)); - path_map.fill_to(path_idx.index()); +impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { + fn gather_statement(&mut self, loc: Location, stmt: &Statement<'tcx>) { + debug!("gather_statement({:?}, {:?})", loc, stmt); + match stmt.kind { + StatementKind::Assign(ref lval, ref rval) => { + self.create_move_path(lval); + self.gather_rvalue(loc, rval); + } + StatementKind::StorageLive(_) | + StatementKind::StorageDead(_) => {} + StatementKind::SetDiscriminant{ .. } => { + span_bug!(stmt.source_info.span, + "SetDiscriminant should not exist during borrowck"); + } + StatementKind::Nop => {} + } } - for (bb, bb_data) in mir.basic_blocks().iter_enumerated() { - let loc_map_bb = &mut loc_map[bb.index()]; - - debug_assert!(loc_map_bb.len() == 0); - let len = bb_data.statements.len(); - loc_map_bb.fill_to(len); - debug_assert!(loc_map_bb.len() == len + 1); - - let mut bb_ctxt = BlockContext { - _tcx: tcx, - moves: &mut moves, - builder: builder, - path_map: &mut path_map, - loc_map_bb: loc_map_bb, - }; - - for (i, stmt) in bb_data.statements.iter().enumerate() { - let source = Location { block: bb, index: i }; - match stmt.kind { - StatementKind::Assign(ref lval, ref rval) => { - bb_ctxt.builder.create_move_path(lval); - - // Ensure that the path_map contains entries even - // if the lvalue is assigned and never read. - let assigned_path = bb_ctxt.builder.move_path_for(lval); - bb_ctxt.path_map.fill_to(assigned_path.index()); - - match *rval { - Rvalue::Use(ref operand) => { - bb_ctxt.on_operand(SK::Use, operand, source) - } - Rvalue::Repeat(ref operand, ref _const) => - bb_ctxt.on_operand(SK::Repeat, operand, source), - Rvalue::Cast(ref _kind, ref operand, ref _ty) => - bb_ctxt.on_operand(SK::Cast, operand, source), - Rvalue::BinaryOp(ref _binop, ref operand1, ref operand2) | - Rvalue::CheckedBinaryOp(ref _binop, ref operand1, ref operand2) => { - bb_ctxt.on_operand(SK::BinaryOp, operand1, source); - bb_ctxt.on_operand(SK::BinaryOp, operand2, source); - } - Rvalue::UnaryOp(ref _unop, ref operand) => { - bb_ctxt.on_operand(SK::UnaryOp, operand, source); - } - Rvalue::Box(ref _ty) => { - // this is creating uninitialized - // memory that needs to be initialized. - let deref_lval = Lvalue::Projection(Box::new(Projection { - base: lval.clone(), - elem: ProjectionElem::Deref, - })); - bb_ctxt.on_move_out_lval(SK::Box, &deref_lval, source); - } - Rvalue::Aggregate(ref _kind, ref operands) => { - for operand in operands { - bb_ctxt.on_operand(SK::Aggregate, operand, source); - } - } - Rvalue::Ref(..) | - Rvalue::Len(..) | - Rvalue::InlineAsm { .. } => {} - } - } - StatementKind::StorageLive(_) | - StatementKind::StorageDead(_) => {} - StatementKind::SetDiscriminant{ .. } => { - span_bug!(stmt.source_info.span, - "SetDiscriminant should not exist during borrowck"); + fn gather_rvalue(&mut self, loc: Location, rvalue: &Rvalue<'tcx>) { + match *rvalue { + Rvalue::Use(ref operand) | + Rvalue::Repeat(ref operand, _) | + Rvalue::Cast(_, ref operand, _) | + Rvalue::UnaryOp(_, ref operand) => { + self.gather_operand(loc, operand) + } + Rvalue::BinaryOp(ref _binop, ref lhs, ref rhs) | + Rvalue::CheckedBinaryOp(ref _binop, ref lhs, ref rhs) => { + self.gather_operand(loc, lhs); + self.gather_operand(loc, rhs); + } + Rvalue::Aggregate(ref _kind, ref operands) => { + for operand in operands { + self.gather_operand(loc, operand); } } + Rvalue::Ref(..) | + Rvalue::Len(..) | + Rvalue::InlineAsm { .. } => {} + Rvalue::Box(..) => { + // This returns an rvalue with uninitialized contents. We can't + // move out of it here because it is an rvalue - assignments always + // completely initialize their lvalue. + // + // However, this does not matter - MIR building is careful to + // only emit a shallow free for the partially-initialized + // temporary. + // + // In any case, if we want to fix this, we have to register a + // special move and change the `statement_effect` functions. + } } + } - debug!("gather_moves({:?})", bb_data.terminator()); - match bb_data.terminator().kind { + fn gather_terminator(&mut self, loc: Location, term: &Terminator<'tcx>) { + debug!("gather_terminator({:?}, {:?})", loc, term); + match term.kind { TerminatorKind::Goto { target: _ } | TerminatorKind::Resume | TerminatorKind::Unreachable => { } TerminatorKind::Return => { - let source = Location { block: bb, - index: bb_data.statements.len() }; - debug!("gather_moves Return on_move_out_lval return {:?}", source); - bb_ctxt.on_move_out_lval(SK::Return, &Lvalue::ReturnPointer, source); + self.gather_move(loc, &Lvalue::ReturnPointer); } - TerminatorKind::If { ref cond, targets: _ } => { - let source = Location { block: bb, - index: bb_data.statements.len() }; - bb_ctxt.on_operand(SK::If, cond, source); - } - - TerminatorKind::Assert { - ref cond, expected: _, - ref msg, target: _, cleanup: _ - } => { - // The `cond` is always of (copyable) type `bool`, - // so there will never be anything to move. - let _ = cond; - match *msg { - AssertMessage:: BoundsCheck { ref len, ref index } => { - // Same for the usize length and index in bounds-checking. - let _ = (len, index); - } - AssertMessage::Math(_) => {} - } - } - - TerminatorKind::SwitchInt { switch_ty: _, values: _, targets: _, ref discr } | - TerminatorKind::Switch { adt_def: _, targets: _, ref discr } => { - // The `discr` is not consumed; that is instead - // encoded on specific match arms (and for - // SwitchInt`, it is always a copyable integer - // type anyway). - let _ = discr; + TerminatorKind::If { .. } | + TerminatorKind::Assert { .. } | + TerminatorKind::SwitchInt { .. } | + TerminatorKind::Switch { .. } => { + // branching terminators - these don't move anything } TerminatorKind::Drop { ref location, target: _, unwind: _ } => { - let source = Location { block: bb, - index: bb_data.statements.len() }; - bb_ctxt.on_move_out_lval(SK::Drop, location, source); + self.gather_move(loc, location); } TerminatorKind::DropAndReplace { ref location, ref value, .. } => { - let assigned_path = bb_ctxt.builder.move_path_for(location); - bb_ctxt.path_map.fill_to(assigned_path.index()); - - let source = Location { block: bb, - index: bb_data.statements.len() }; - bb_ctxt.on_operand(SK::Use, value, source); + self.create_move_path(location); + self.gather_operand(loc, value); } TerminatorKind::Call { ref func, ref args, ref destination, cleanup: _ } => { - let source = Location { block: bb, - index: bb_data.statements.len() }; - bb_ctxt.on_operand(SK::CallFn, func, source); + self.gather_operand(loc, func); for arg in args { - debug!("gather_moves Call on_operand {:?} {:?}", arg, source); - bb_ctxt.on_operand(SK::CallArg, arg, source); + self.gather_operand(loc, arg); } if let Some((ref destination, _bb)) = *destination { - debug!("gather_moves Call create_move_path {:?} {:?}", destination, source); - - // Ensure that the path_map contains entries even - // if the lvalue is assigned and never read. - let assigned_path = bb_ctxt.builder.move_path_for(destination); - bb_ctxt.path_map.fill_to(assigned_path.index()); - - bb_ctxt.builder.create_move_path(destination); + self.create_move_path(destination); } } } - - builder = bb_ctxt.builder; } - // At this point, we may have created some MovePaths that do not - // have corresponding entries in the path map. - // - // (For example, creating the path `a.b.c` may, as a side-effect, - // create a path for the parent path `a.b`.) - // - // All such paths were not referenced ... - // - // well you know, lets actually try just asserting that the path map *is* complete. - assert_eq!(path_map.len(), builder.pre_move_paths.len()); - - let pre_move_paths = builder.pre_move_paths; - let move_paths: Vec<_> = pre_move_paths.into_iter() - .map(|p| p.into_move_path()) - .collect(); - - debug!("{}", { - let mut seen: Vec<_> = move_paths.iter().map(|_| false).collect(); - for (j, &MoveOut { ref path, ref source }) in moves.iter().enumerate() { - debug!("MovePathData moves[{}]: MoveOut {{ path: {:?} = {:?}, source: {:?} }}", - j, path, move_paths[path.index()], source); - seen[path.index()] = true; - } - for (j, path) in move_paths.iter().enumerate() { - if !seen[j] { - debug!("MovePathData move_paths[{}]: {:?}", j, path); + fn gather_operand(&mut self, loc: Location, operand: &Operand<'tcx>) { + match *operand { + Operand::Constant(..) => {} // not-a-move + Operand::Consume(ref lval) => { // a move + self.gather_move(loc, lval); } } - "done dumping MovePathData" - }); - - MoveData { - move_paths: MovePathData { move_paths: move_paths, }, - moves: moves, - loc_map: LocMap { map: loc_map }, - path_map: PathMap { map: path_map }, - rev_lookup: builder.rev_lookup, } -} -struct BlockContext<'b, 'tcx: 'b> { - _tcx: TyCtxt<'b, 'tcx, 'tcx>, - moves: &'b mut Vec, - builder: MovePathDataBuilder<'tcx>, - path_map: &'b mut Vec>, - loc_map_bb: &'b mut Vec>, -} + fn gather_move(&mut self, loc: Location, lval: &Lvalue<'tcx>) { + debug!("gather_move({:?}, {:?})", loc, lval); -impl<'b, 'tcx: 'b> BlockContext<'b, 'tcx> { - fn on_move_out_lval(&mut self, - stmt_kind: StmtKind, - lval: &Lvalue<'tcx>, - source: Location) { - let i = source.index; - let index = MoveOutIndex::new(self.moves.len()); - - let path = self.builder.move_path_for(lval); - self.moves.push(MoveOut { path: path, source: source.clone() }); - self.path_map.fill_to(path.index()); - - debug!("ctxt: {:?} add consume of lval: {:?} \ - at index: {:?} \ - to path_map for path: {:?} and \ - to loc_map for loc: {:?}", - stmt_kind, lval, index, path, source); - - debug_assert!(path.index() < self.path_map.len()); - // this is actually a questionable assert; at the very - // least, incorrect input code can probably cause it to - // fire. - assert!(self.path_map[path.index()].iter().find(|idx| **idx == index).is_none()); - self.path_map[path.index()].push(index); - - debug_assert!(i < self.loc_map_bb.len()); - debug_assert!(self.loc_map_bb[i].iter().find(|idx| **idx == index).is_none()); - self.loc_map_bb[i].push(index); - } + let lv_ty = lval.ty(self.mir, self.tcx).to_ty(self.tcx); + if !lv_ty.moves_by_default(self.tcx, self.param_env, DUMMY_SP) { + debug!("gather_move({:?}, {:?}) - {:?} is Copy. skipping", loc, lval, lv_ty); + return + } - fn on_operand(&mut self, stmt_kind: StmtKind, operand: &Operand<'tcx>, source: Location) { - match *operand { - Operand::Constant(..) => {} // not-a-move - Operand::Consume(ref lval) => { // a move - self.on_move_out_lval(stmt_kind, lval, source); + let path = match self.move_path_for(lval) { + Ok(path) | Err(MovePathError::UnionMove { path }) => path, + Err(MovePathError::IllegalMove) => { + // Moving out of a bad path. Eventually, this should be a MIR + // borrowck error instead of a bug. + span_bug!(self.mir.span, + "Broken MIR: moving out of lvalue {:?}: {:?} at {:?}", + lval, lv_ty, loc); } - } + }; + let move_out = self.data.moves.push(MoveOut { path: path, source: loc }); + + debug!("gather_move({:?}, {:?}): adding move {:?} of {:?}", + loc, lval, move_out, path); + + self.data.path_map[path].push(move_out); + self.data.loc_map[loc].push(move_out); } } diff --git a/src/librustc_borrowck/borrowck/mir/mod.rs b/src/librustc_borrowck/borrowck/mir/mod.rs index dbee0ea9b0..f26afdc2b8 100644 --- a/src/librustc_borrowck/borrowck/mir/mod.rs +++ b/src/librustc_borrowck/borrowck/mir/mod.rs @@ -11,7 +11,6 @@ use borrowck::BorrowckCtxt; use syntax::ast::{self, MetaItem}; -use syntax::attr::AttrMetaMethods; use syntax::ptr::P; use syntax_pos::{Span, DUMMY_SP}; @@ -19,7 +18,7 @@ use rustc::hir; use rustc::hir::intravisit::{FnKind}; use rustc::mir::repr; -use rustc::mir::repr::{BasicBlock, BasicBlockData, Mir, Statement, Terminator}; +use rustc::mir::repr::{BasicBlock, BasicBlockData, Mir, Statement, Terminator, Location}; use rustc::session::Session; use rustc::ty::{self, TyCtxt}; @@ -35,16 +34,16 @@ use self::dataflow::{DataflowOperator}; use self::dataflow::{Dataflow, DataflowAnalysis, DataflowResults}; use self::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals}; use self::dataflow::{DefinitelyInitializedLvals}; -use self::gather_moves::{MoveData, MovePathIndex, Location}; -use self::gather_moves::{MovePathContent, MovePathData}; +use self::gather_moves::{MoveData, MovePathIndex, LookupResult}; fn has_rustc_mir_with(attrs: &[ast::Attribute], name: &str) -> Option> { for attr in attrs { if attr.check_name("rustc_mir") { let items = attr.meta_item_list(); for item in items.iter().flat_map(|l| l.iter()) { - if item.check_name(name) { - return Some(item.clone()) + match item.meta_item() { + Some(mi) if mi.check_name(name) => return Some(mi.clone()), + _ => continue } } } @@ -67,8 +66,8 @@ pub fn borrowck_mir<'a, 'tcx: 'a>( id: ast::NodeId, attributes: &[ast::Attribute]) { match fk { - FnKind::ItemFn(name, _, _, _, _, _, _) | - FnKind::Method(name, _, _, _) => { + FnKind::ItemFn(name, ..) | + FnKind::Method(name, ..) => { debug!("borrowck_mir({}) UNIMPLEMENTED", name); } FnKind::Closure(_) => { @@ -78,8 +77,8 @@ pub fn borrowck_mir<'a, 'tcx: 'a>( let tcx = bcx.tcx; - let move_data = MoveData::gather_moves(mir, tcx); let param_env = ty::ParameterEnvironment::for_item(tcx, id); + let move_data = MoveData::gather_moves(mir, tcx, ¶m_env); let mdpe = MoveDataParamEnv { move_data: move_data, param_env: param_env }; let flow_inits = do_dataflow(tcx, mir, id, attributes, &mdpe, MaybeInitializedLvals::new(tcx, mir)); @@ -126,8 +125,6 @@ fn do_dataflow<'a, 'tcx, BD>(tcx: TyCtxt<'a, 'tcx, 'tcx>, bd: BD) -> DataflowResults where BD: BitDenotation> + DataflowOperator { - use syntax::attr::AttrMetaMethods; - let name_found = |sess: &Session, attrs: &[ast::Attribute], name| -> Option { if let Some(item) = has_rustc_mir_with(attrs, name) { if let Some(s) = item.value_str() { @@ -213,23 +210,23 @@ impl DropFlagState { } } -fn move_path_children_matching<'tcx, F>(move_paths: &MovePathData<'tcx>, +fn move_path_children_matching<'tcx, F>(move_data: &MoveData<'tcx>, path: MovePathIndex, mut cond: F) -> Option where F: FnMut(&repr::LvalueProjection<'tcx>) -> bool { - let mut next_child = move_paths[path].first_child; + let mut next_child = move_data.move_paths[path].first_child; while let Some(child_index) = next_child { - match move_paths[child_index].content { - MovePathContent::Lvalue(repr::Lvalue::Projection(ref proj)) => { + match move_data.move_paths[child_index].lvalue { + repr::Lvalue::Projection(ref proj) => { if cond(proj) { return Some(child_index) } } _ => {} } - next_child = move_paths[child_index].next_sibling; + next_child = move_data.move_paths[child_index].next_sibling; } None @@ -259,12 +256,12 @@ fn lvalue_contents_drop_state_cannot_differ<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx let ty = lv.ty(mir, tcx).to_ty(tcx); match ty.sty { ty::TyArray(..) | ty::TySlice(..) | ty::TyRef(..) | ty::TyRawPtr(..) => { - debug!("lvalue_contents_drop_state_cannot_differ lv: {:?} ty: {:?} refd => false", + debug!("lvalue_contents_drop_state_cannot_differ lv: {:?} ty: {:?} refd => true", lv, ty); true } - ty::TyStruct(def, _) | ty::TyEnum(def, _) if def.has_dtor() => { - debug!("lvalue_contents_drop_state_cannot_differ lv: {:?} ty: {:?} Drop => false", + ty::TyAdt(def, _) if def.has_dtor() || def.is_union() => { + debug!("lvalue_contents_drop_state_cannot_differ lv: {:?} ty: {:?} Drop => true", lv, ty); true } @@ -274,6 +271,24 @@ fn lvalue_contents_drop_state_cannot_differ<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx } } +fn on_lookup_result_bits<'a, 'tcx, F>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + move_data: &MoveData<'tcx>, + lookup_result: LookupResult, + each_child: F) + where F: FnMut(MovePathIndex) +{ + match lookup_result { + LookupResult::Parent(..) => { + // access to untracked value - do not touch children + } + LookupResult::Exact(e) => { + on_all_children_bits(tcx, mir, move_data, e, each_child) + } + } +} + fn on_all_children_bits<'a, 'tcx, F>( tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &Mir<'tcx>, @@ -288,12 +303,8 @@ fn on_all_children_bits<'a, 'tcx, F>( move_data: &MoveData<'tcx>, path: MovePathIndex) -> bool { - match move_data.move_paths[path].content { - MovePathContent::Lvalue(ref lvalue) => { - lvalue_contents_drop_state_cannot_differ(tcx, mir, lvalue) - } - _ => true - } + lvalue_contents_drop_state_cannot_differ( + tcx, mir, &move_data.move_paths[path].lvalue) } fn on_all_children_bits<'a, 'tcx, F>( @@ -329,10 +340,10 @@ fn drop_flag_effects_for_function_entry<'a, 'tcx, F>( let move_data = &ctxt.move_data; for (arg, _) in mir.arg_decls.iter_enumerated() { let lvalue = repr::Lvalue::Arg(arg); - let move_path_index = move_data.rev_lookup.find(&lvalue); - on_all_children_bits(tcx, mir, move_data, - move_path_index, - |moi| callback(moi, DropFlagState::Present)); + let lookup_result = move_data.rev_lookup.find(&lvalue); + on_lookup_result_bits(tcx, mir, move_data, + lookup_result, + |moi| callback(moi, DropFlagState::Present)); } } @@ -354,11 +365,10 @@ fn drop_flag_effects_for_location<'a, 'tcx, F>( debug!("moving out of path {:?}", move_data.move_paths[path]); // don't move out of non-Copy things - if let MovePathContent::Lvalue(ref lvalue) = move_data.move_paths[path].content { - let ty = lvalue.ty(mir, tcx).to_ty(tcx); - if !ty.moves_by_default(tcx, param_env, DUMMY_SP) { - continue; - } + let lvalue = &move_data.move_paths[path].lvalue; + let ty = lvalue.ty(mir, tcx).to_ty(tcx); + if !ty.moves_by_default(tcx, param_env, DUMMY_SP) { + continue; } on_all_children_bits(tcx, mir, move_data, @@ -367,27 +377,28 @@ fn drop_flag_effects_for_location<'a, 'tcx, F>( } let block = &mir[loc.block]; - match block.statements.get(loc.index) { + match block.statements.get(loc.statement_index) { Some(stmt) => match stmt.kind { repr::StatementKind::SetDiscriminant{ .. } => { span_bug!(stmt.source_info.span, "SetDiscrimant should not exist during borrowck"); } repr::StatementKind::Assign(ref lvalue, _) => { debug!("drop_flag_effects: assignment {:?}", stmt); - on_all_children_bits(tcx, mir, move_data, - move_data.rev_lookup.find(lvalue), - |moi| callback(moi, DropFlagState::Present)) + on_lookup_result_bits(tcx, mir, move_data, + move_data.rev_lookup.find(lvalue), + |moi| callback(moi, DropFlagState::Present)) } repr::StatementKind::StorageLive(_) | - repr::StatementKind::StorageDead(_) => {} + repr::StatementKind::StorageDead(_) | + repr::StatementKind::Nop => {} }, None => { debug!("drop_flag_effects: replace {:?}", block.terminator()); match block.terminator().kind { repr::TerminatorKind::DropAndReplace { ref location, .. } => { - on_all_children_bits(tcx, mir, move_data, - move_data.rev_lookup.find(location), - |moi| callback(moi, DropFlagState::Present)) + on_lookup_result_bits(tcx, mir, move_data, + move_data.rev_lookup.find(location), + |moi| callback(moi, DropFlagState::Present)) } _ => { // other terminators do not contain move-ins diff --git a/src/librustc_borrowck/borrowck/mir/patch.rs b/src/librustc_borrowck/borrowck/mir/patch.rs index 417e719a9d..52cd1a9f94 100644 --- a/src/librustc_borrowck/borrowck/mir/patch.rs +++ b/src/librustc_borrowck/borrowck/mir/patch.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::gather_moves::Location; use rustc::ty::Ty; use rustc::mir::repr::*; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; @@ -89,7 +88,7 @@ impl<'tcx> MirPatch<'tcx> { }; Location { block: bb, - index: offset + statement_index: offset } } @@ -149,12 +148,12 @@ impl<'tcx> MirPatch<'tcx> { } debug!("MirPatch: adding statement {:?} at loc {:?}+{}", stmt, loc, delta); - loc.index += delta; + loc.statement_index += delta; let source_info = Self::source_info_for_index( &mir[loc.block], loc ); mir[loc.block].statements.insert( - loc.index, Statement { + loc.statement_index, Statement { source_info: source_info, kind: stmt }); @@ -163,7 +162,7 @@ impl<'tcx> MirPatch<'tcx> { } pub fn source_info_for_index(data: &BasicBlockData, loc: Location) -> SourceInfo { - match data.statements.get(loc.index) { + match data.statements.get(loc.statement_index) { Some(stmt) => stmt.source_info, None => data.terminator().source_info } diff --git a/src/librustc_borrowck/borrowck/mod.rs b/src/librustc_borrowck/borrowck/mod.rs index e0cbd972bd..5d62629b64 100644 --- a/src/librustc_borrowck/borrowck/mod.rs +++ b/src/librustc_borrowck/borrowck/mod.rs @@ -41,8 +41,8 @@ use rustc::ty::{self, TyCtxt}; use std::fmt; use std::mem; use std::rc::Rc; +use std::hash::{Hash, Hasher}; use syntax::ast; -use syntax::attr::AttrMetaMethods; use syntax_pos::{MultiSpan, Span}; use errors::DiagnosticBuilder; @@ -142,7 +142,7 @@ fn borrowck_item(this: &mut BorrowckCtxt, item: &hir::Item) { // loan step is intended for things that have a data // flow dependent conditions. match item.node { - hir::ItemStatic(_, _, ref ex) | + hir::ItemStatic(.., ref ex) | hir::ItemConst(_, ref ex) => { gather_loans::gather_loans_in_static_initializer(this, item.id, &ex); } @@ -345,7 +345,7 @@ impl<'tcx> Loan<'tcx> { } } -#[derive(Eq, Hash)] +#[derive(Eq)] pub struct LoanPath<'tcx> { kind: LoanPathKind<'tcx>, ty: ty::Ty<'tcx>, @@ -353,10 +353,13 @@ pub struct LoanPath<'tcx> { impl<'tcx> PartialEq for LoanPath<'tcx> { fn eq(&self, that: &LoanPath<'tcx>) -> bool { - let r = self.kind == that.kind; - debug_assert!(self.ty == that.ty || !r, - "Somehow loan paths are equal though their tys are not."); - r + self.kind == that.kind + } +} + +impl<'tcx> Hash for LoanPath<'tcx> { + fn hash(&self, state: &mut H) { + self.kind.hash(state); } } @@ -365,7 +368,7 @@ pub enum LoanPathKind<'tcx> { LpVar(ast::NodeId), // `x` in README.md LpUpvar(ty::UpvarId), // `x` captured by-value into closure LpDowncast(Rc>, DefId), // `x` downcast to particular enum variant - LpExtend(Rc>, mc::MutabilityCategory, LoanPathElem) + LpExtend(Rc>, mc::MutabilityCategory, LoanPathElem<'tcx>) } impl<'tcx> LoanPath<'tcx> { @@ -410,8 +413,8 @@ impl ToInteriorKind for mc::InteriorKind { // `enum E { X { foo: u32 }, Y { foo: u32 }}` // each `foo` is qualified by the definitition id of the variant (`X` or `Y`). #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub enum LoanPathElem { - LpDeref(mc::PointerKind), +pub enum LoanPathElem<'tcx> { + LpDeref(mc::PointerKind<'tcx>), LpInterior(Option, InteriorKind), } @@ -419,7 +422,7 @@ pub fn closure_to_block(closure_id: ast::NodeId, tcx: TyCtxt) -> ast::NodeId { match tcx.map.get(closure_id) { hir_map::NodeExpr(expr) => match expr.node { - hir::ExprClosure(_, _, ref block, _) => { + hir::ExprClosure(.., ref block, _) => { block.id } _ => { @@ -439,7 +442,7 @@ impl<'a, 'tcx> LoanPath<'tcx> { tcx.region_maps.node_extent(block_id) } LpDowncast(ref base, _) | - LpExtend(ref base, _, _) => base.kill_scope(tcx), + LpExtend(ref base, ..) => base.kill_scope(tcx), } } @@ -461,7 +464,7 @@ impl<'a, 'tcx> LoanPath<'tcx> { fn depth(&self) -> usize { match self.kind { LpExtend(ref base, _, LpDeref(_)) => base.depth(), - LpExtend(ref base, _, LpInterior(_, _)) => base.depth() + 1, + LpExtend(ref base, _, LpInterior(..)) => base.depth() + 1, _ => 0, } } @@ -474,8 +477,6 @@ impl<'a, 'tcx> LoanPath<'tcx> { base.common(&base2).map(|x| { let xd = x.depth(); if base.depth() == xd && base2.depth() == xd { - assert_eq!(base.ty, base2.ty); - assert_eq!(self.ty, other.ty); LoanPath { kind: LpExtend(Rc::new(x), a, LpInterior(opt_variant_id, id)), ty: self.ty, @@ -492,7 +493,6 @@ impl<'a, 'tcx> LoanPath<'tcx> { (_, &LpExtend(ref other, _, LpDeref(_))) => self.common(&other), (&LpVar(id), &LpVar(id2)) => { if id == id2 { - assert_eq!(self.ty, other.ty); Some(LoanPath { kind: LpVar(id), ty: self.ty }) } else { None @@ -500,7 +500,6 @@ impl<'a, 'tcx> LoanPath<'tcx> { } (&LpUpvar(id), &LpUpvar(id2)) => { if id == id2 { - assert_eq!(self.ty, other.ty); Some(LoanPath { kind: LpUpvar(id), ty: self.ty }) } else { None @@ -564,10 +563,11 @@ pub fn opt_loan_path<'tcx>(cmt: &mc::cmt<'tcx>) -> Option>> { // Errors that can occur #[derive(PartialEq)] -pub enum bckerr_code { +pub enum bckerr_code<'tcx> { err_mutbl, - err_out_of_scope(ty::Region, ty::Region), // superscope, subscope - err_borrowed_pointer_too_short(ty::Region, ty::Region), // loan, ptr + /// superscope, subscope, loan cause + err_out_of_scope(&'tcx ty::Region, &'tcx ty::Region, euv::LoanCause), + err_borrowed_pointer_too_short(&'tcx ty::Region, &'tcx ty::Region), // loan, ptr } // Combination of an error code and the categorization of the expression @@ -577,7 +577,7 @@ pub struct BckError<'tcx> { span: Span, cause: AliasableViolationKind, cmt: mc::cmt<'tcx>, - code: bckerr_code + code: bckerr_code<'tcx> } #[derive(Copy, Clone, Debug, PartialEq)] @@ -605,7 +605,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { self.free_region_map = old_free_region_map; } - pub fn is_subregion_of(&self, r_sub: ty::Region, r_sup: ty::Region) + pub fn is_subregion_of(&self, r_sub: &'tcx ty::Region, r_sup: &'tcx ty::Region) -> bool { self.free_region_map.is_subregion_of(self.tcx, r_sub, r_sup) @@ -614,9 +614,9 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { pub fn report(&self, err: BckError<'tcx>) { // Catch and handle some particular cases. match (&err.code, &err.cause) { - (&err_out_of_scope(ty::ReScope(_), ty::ReStatic), + (&err_out_of_scope(&ty::ReScope(_), &ty::ReStatic, _), &BorrowViolation(euv::ClosureCapture(span))) | - (&err_out_of_scope(ty::ReScope(_), ty::ReFree(..)), + (&err_out_of_scope(&ty::ReScope(_), &ty::ReFree(..), _), &BorrowViolation(euv::ClosureCapture(span))) => { return self.report_out_of_scope_escaping_closure_capture(&err, span); } @@ -711,7 +711,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { move_data::Captured => (match self.tcx.map.expect_expr(the_move.id).node { - hir::ExprClosure(_, _, _, fn_decl_span) => fn_decl_span, + hir::ExprClosure(.., fn_decl_span) => fn_decl_span, ref r => bug!("Captured({}) maps to non-closure: {:?}", the_move.id, r), }, " (into closure)"), @@ -914,14 +914,18 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } mc::AliasableStatic | mc::AliasableStaticMut => { - struct_span_err!( + let mut err = struct_span_err!( self.tcx.sess, span, E0388, - "{} in a static location", prefix) + "{} in a static location", prefix); + err.span_label(span, &format!("cannot write data in a static definition")); + err } mc::AliasableBorrowed => { - struct_span_err!( + let mut e = struct_span_err!( self.tcx.sess, span, E0389, - "{} in a `&` reference", prefix) + "{} in a `&` reference", prefix); + e.span_label(span, &"assignment into an immutable reference"); + e } }; @@ -963,6 +967,22 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { .emit(); } + fn region_end_span(&self, region: &'tcx ty::Region) -> Option { + match *region { + ty::ReScope(scope) => { + match scope.span(&self.tcx.region_maps, &self.tcx.map) { + Some(s) => { + Some(s.end_point()) + } + None => { + None + } + } + } + _ => None + } + } + pub fn note_and_explain_bckerr(&self, db: &mut DiagnosticBuilder, err: BckError<'tcx>, error_span: Span) { let code = err.code; @@ -1003,21 +1023,73 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } - err_out_of_scope(super_scope, sub_scope) => { - self.tcx.note_and_explain_region( - db, - "reference must be valid for ", - sub_scope, - "..."); - self.tcx.note_and_explain_region( - db, - "...but borrowed value is only valid for ", - super_scope, - ""); - if let Some(span) = statement_scope_span(self.tcx, super_scope) { - db.span_label(error_span, &format!("does not live long enough")); - db.span_help(span, - "consider using a `let` binding to increase its lifetime"); + err_out_of_scope(super_scope, sub_scope, cause) => { + let (value_kind, value_msg) = match err.cmt.cat { + mc::Categorization::Rvalue(_) => + ("temporary value", "temporary value created here"), + _ => + ("borrowed value", "does not live long enough") + }; + match cause { + euv::ClosureCapture(s) => { + // The primary span starts out as the closure creation point. + // Change the primary span here to highlight the use of the variable + // in the closure, because it seems more natural. Highlight + // closure creation point as a secondary span. + match db.span.primary_span() { + Some(primary) => { + db.span = MultiSpan::from_span(s); + db.span_label(primary, &format!("capture occurs here")); + db.span_label(s, &value_msg); + } + None => () + } + } + _ => { + db.span_label(error_span, &value_msg); + } + } + + let sub_span = self.region_end_span(sub_scope); + let super_span = self.region_end_span(super_scope); + + match (sub_span, super_span) { + (Some(s1), Some(s2)) if s1 == s2 => { + db.span_label(s1, &format!("{} dropped before borrower", value_kind)); + db.note("values in a scope are dropped in the opposite order \ + they are created"); + } + _ => { + match sub_span { + Some(s) => { + db.span_label(s, &format!("{} needs to live until here", + value_kind)); + } + None => { + self.tcx.note_and_explain_region( + db, + "borrowed value must be valid for ", + sub_scope, + "..."); + } + } + match super_span { + Some(s) => { + db.span_label(s, &format!("{} only lives until here", value_kind)); + } + None => { + self.tcx.note_and_explain_region( + db, + "...but borrowed value is only valid for ", + super_scope, + ""); + } + } + } + } + + if let Some(_) = statement_scope_span(self.tcx, super_scope) { + db.note("consider using a `let` binding to increase its lifetime"); } } @@ -1060,7 +1132,6 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { out.push(')'); } - LpExtend(ref lp_base, _, LpInterior(_, InteriorField(fname))) => { self.append_autoderefd_loan_path_to_string(&lp_base, out); match fname { @@ -1106,7 +1177,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { out.push(')'); } - LpVar(..) | LpUpvar(..) | LpExtend(_, _, LpInterior(..)) => { + LpVar(..) | LpUpvar(..) | LpExtend(.., LpInterior(..)) => { self.append_loan_path_to_string(loan_path, out) } } @@ -1130,8 +1201,8 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } -fn statement_scope_span(tcx: TyCtxt, region: ty::Region) -> Option { - match region { +fn statement_scope_span(tcx: TyCtxt, region: &ty::Region) -> Option { + match *region { ty::ReScope(scope) => { match tcx.map.find(scope.node_id(&tcx.region_maps)) { Some(hir_map::NodeStmt(stmt)) => Some(stmt.span), diff --git a/src/librustc_borrowck/borrowck/move_data.rs b/src/librustc_borrowck/borrowck/move_data.rs index c9822a4fee..e9ba406389 100644 --- a/src/librustc_borrowck/borrowck/move_data.rs +++ b/src/librustc_borrowck/borrowck/move_data.rs @@ -21,7 +21,8 @@ use rustc::middle::dataflow::DataFlowOperator; use rustc::middle::dataflow::KillFrom; use rustc::middle::expr_use_visitor as euv; use rustc::middle::expr_use_visitor::MutateMode; -use rustc::ty::TyCtxt; +use rustc::middle::mem_categorization as mc; +use rustc::ty::{self, TyCtxt}; use rustc::util::nodemap::{FnvHashMap, NodeSet}; use std::cell::RefCell; @@ -196,7 +197,7 @@ fn loan_path_is_precise(loan_path: &LoanPath) -> bool { LpVar(_) | LpUpvar(_) => { true } - LpExtend(_, _, LpInterior(_, InteriorKind::InteriorElement(..))) => { + LpExtend(.., LpInterior(_, InteriorKind::InteriorElement(..))) => { // Paths involving element accesses a[i] do not refer to a unique // location, as there is no accurate tracking of the indices. // @@ -206,7 +207,7 @@ fn loan_path_is_precise(loan_path: &LoanPath) -> bool { false } LpDowncast(ref lp_base, _) | - LpExtend(ref lp_base, _, _) => { + LpExtend(ref lp_base, ..) => { loan_path_is_precise(&lp_base) } } @@ -294,7 +295,7 @@ impl<'a, 'tcx> MoveData<'tcx> { } LpDowncast(ref base, _) | - LpExtend(ref base, _, _) => { + LpExtend(ref base, ..) => { let parent_index = self.move_path(tcx, base.clone()); let index = MovePathIndex(self.paths.borrow().len()); @@ -350,7 +351,7 @@ impl<'a, 'tcx> MoveData<'tcx> { match lp.kind { LpVar(..) | LpUpvar(..) => { } LpDowncast(ref b, _) | - LpExtend(ref b, _, _) => { + LpExtend(ref b, ..) => { self.add_existing_base_paths(b, result); } } @@ -364,6 +365,34 @@ impl<'a, 'tcx> MoveData<'tcx> { lp: Rc>, id: ast::NodeId, kind: MoveKind) { + // Moving one union field automatically moves all its fields. + if let LpExtend(ref base_lp, mutbl, LpInterior(opt_variant_id, interior)) = lp.kind { + if let ty::TyAdt(adt_def, _) = base_lp.ty.sty { + if adt_def.is_union() { + for field in &adt_def.struct_variant().fields { + let field = InteriorKind::InteriorField(mc::NamedField(field.name)); + let field_ty = if field == interior { + lp.ty + } else { + tcx.types.err // Doesn't matter + }; + let sibling_lp_kind = LpExtend(base_lp.clone(), mutbl, + LpInterior(opt_variant_id, field)); + let sibling_lp = Rc::new(LoanPath::new(sibling_lp_kind, field_ty)); + self.add_move_helper(tcx, sibling_lp, id, kind); + } + return; + } + } + } + + self.add_move_helper(tcx, lp.clone(), id, kind); + } + + fn add_move_helper(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + lp: Rc>, + id: ast::NodeId, + kind: MoveKind) { debug!("add_move(lp={:?}, id={}, kind={:?})", lp, id, @@ -393,6 +422,37 @@ impl<'a, 'tcx> MoveData<'tcx> { span: Span, assignee_id: ast::NodeId, mode: euv::MutateMode) { + // Assigning to one union field automatically assigns to all its fields. + if let LpExtend(ref base_lp, mutbl, LpInterior(opt_variant_id, interior)) = lp.kind { + if let ty::TyAdt(adt_def, _) = base_lp.ty.sty { + if adt_def.is_union() { + for field in &adt_def.struct_variant().fields { + let field = InteriorKind::InteriorField(mc::NamedField(field.name)); + let field_ty = if field == interior { + lp.ty + } else { + tcx.types.err // Doesn't matter + }; + let sibling_lp_kind = LpExtend(base_lp.clone(), mutbl, + LpInterior(opt_variant_id, field)); + let sibling_lp = Rc::new(LoanPath::new(sibling_lp_kind, field_ty)); + self.add_assignment_helper(tcx, sibling_lp, assign_id, + span, assignee_id, mode); + } + return; + } + } + } + + self.add_assignment_helper(tcx, lp.clone(), assign_id, span, assignee_id, mode); + } + + fn add_assignment_helper(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + lp: Rc>, + assign_id: ast::NodeId, + span: Span, + assignee_id: ast::NodeId, + mode: euv::MutateMode) { debug!("add_assignment(lp={:?}, assign_id={}, assignee_id={}", lp, assign_id, assignee_id); diff --git a/src/librustc_borrowck/lib.rs b/src/librustc_borrowck/lib.rs index 16fefee347..d3ab9c9318 100644 --- a/src/librustc_borrowck/lib.rs +++ b/src/librustc_borrowck/lib.rs @@ -19,13 +19,14 @@ #![allow(non_camel_case_types)] +#![feature(dotdot_in_tuple_patterns)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] #![feature(staged_api)] #![feature(associated_consts)] #![feature(nonzero)] -#![feature(question_mark)] +#![cfg_attr(stage0, feature(question_mark))] #[macro_use] extern crate log; #[macro_use] extern crate syntax; extern crate syntax_pos; diff --git a/src/librustc_const_eval/check_match.rs b/src/librustc_const_eval/check_match.rs index bf6ebcb5ef..eb74936d8c 100644 --- a/src/librustc_const_eval/check_match.rs +++ b/src/librustc_const_eval/check_match.rs @@ -26,8 +26,7 @@ use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization::{cmt}; use rustc::hir::pat_util::*; use rustc::traits::Reveal; -use rustc::ty::*; -use rustc::ty; +use rustc::ty::{self, Ty, TyCtxt}; use std::cmp::Ordering; use std::fmt; use std::iter::{FromIterator, IntoIterator, repeat}; @@ -40,11 +39,10 @@ use rustc_back::slice; use syntax::ast::{self, DUMMY_NODE_ID, NodeId}; use syntax::codemap::Spanned; use syntax_pos::{Span, DUMMY_SP}; -use rustc::hir::fold::{Folder, noop_fold_pat}; use rustc::hir::print::pat_to_string; use syntax::ptr::P; +use syntax::util::move_map::MoveMap; use rustc::util::common::ErrorReported; -use rustc::util::nodemap::FnvHashMap; pub const DUMMY_WILD_PAT: &'static Pat = &Pat { id: DUMMY_NODE_ID, @@ -111,7 +109,7 @@ impl<'a, 'tcx> FromIterator>)>> for Matrix<'a, 'tc //NOTE: appears to be the only place other then InferCtxt to contain a ParamEnv pub struct MatchCheckCtxt<'a, 'tcx: 'a> { pub tcx: TyCtxt<'a, 'tcx, 'tcx>, - pub param_env: ParameterEnvironment<'tcx>, + pub param_env: ty::ParameterEnvironment<'tcx>, } #[derive(Clone, Debug, PartialEq)] @@ -182,7 +180,7 @@ fn check_expr(cx: &mut MatchCheckCtxt, ex: &hir::Expr) { } } - let mut static_inliner = StaticInliner::new(cx.tcx, None); + let mut static_inliner = StaticInliner::new(cx.tcx); let inlined_arms = arms.iter().map(|arm| { (arm.pats.iter().map(|pat| { static_inliner.fold_pat((*pat).clone()) @@ -245,21 +243,23 @@ fn check_for_bindings_named_the_same_as_variants(cx: &MatchCheckCtxt, pat: &Pat) pat.walk(|p| { if let PatKind::Binding(hir::BindByValue(hir::MutImmutable), name, None) = p.node { let pat_ty = cx.tcx.pat_ty(p); - if let ty::TyEnum(edef, _) = pat_ty.sty { - if let Def::Local(..) = cx.tcx.expect_def(p.id) { - if edef.variants.iter().any(|variant| { - variant.name == name.node && variant.kind == VariantKind::Unit - }) { - let ty_path = cx.tcx.item_path_str(edef.did); - let mut err = struct_span_warn!(cx.tcx.sess, p.span, E0170, - "pattern binding `{}` is named the same as one \ - of the variants of the type `{}`", - name.node, ty_path); - help!(err, - "if you meant to match on a variant, \ - consider making the path in the pattern qualified: `{}::{}`", - ty_path, name.node); - err.emit(); + if let ty::TyAdt(edef, _) = pat_ty.sty { + if edef.is_enum() { + if let Def::Local(..) = cx.tcx.expect_def(p.id) { + if edef.variants.iter().any(|variant| { + variant.name == name.node && variant.kind == ty::VariantKind::Unit + }) { + let ty_path = cx.tcx.item_path_str(edef.did); + let mut err = struct_span_warn!(cx.tcx.sess, p.span, E0170, + "pattern binding `{}` is named the same as one \ + of the variants of the type `{}`", + name.node, ty_path); + help!(err, + "if you meant to match on a variant, \ + consider making the path in the pattern qualified: `{}::{}`", + ty_path, name.node); + err.emit(); + } } } } @@ -324,7 +324,10 @@ fn check_arms(cx: &MatchCheckCtxt, let &(ref first_arm_pats, _) = &arms[0]; let first_pat = &first_arm_pats[0]; let span = first_pat.span; - span_err!(cx.tcx.sess, span, E0165, "irrefutable while-let pattern"); + struct_span_err!(cx.tcx.sess, span, E0165, + "irrefutable while-let pattern") + .span_label(span, &format!("irrefutable pattern")) + .emit(); }, hir::MatchSource::ForLoopDesugar => { @@ -369,8 +372,8 @@ fn check_arms(cx: &MatchCheckCtxt, /// Checks for common cases of "catchall" patterns that may not be intended as such. fn pat_is_catchall(dm: &DefMap, p: &Pat) -> bool { match p.node { - PatKind::Binding(_, _, None) => true, - PatKind::Binding(_, _, Some(ref s)) => pat_is_catchall(dm, &s), + PatKind::Binding(.., None) => true, + PatKind::Binding(.., Some(ref s)) => pat_is_catchall(dm, &s), PatKind::Ref(ref s, _) => pat_is_catchall(dm, &s), PatKind::Tuple(ref v, _) => v.iter().all(|p| pat_is_catchall(dm, &p)), _ => false @@ -379,7 +382,7 @@ fn pat_is_catchall(dm: &DefMap, p: &Pat) -> bool { fn raw_pat(p: &Pat) -> &Pat { match p.node { - PatKind::Binding(_, _, Some(ref s)) => raw_pat(&s), + PatKind::Binding(.., Some(ref s)) => raw_pat(&s), _ => p } } @@ -405,10 +408,13 @@ fn check_exhaustive<'a, 'tcx>(cx: &MatchCheckCtxt<'a, 'tcx>, }, _ => bug!(), }; - span_err!(cx.tcx.sess, sp, E0297, + let pattern_string = pat_to_string(witness); + struct_span_err!(cx.tcx.sess, sp, E0297, "refutable pattern in `for` loop binding: \ `{}` not covered", - pat_to_string(witness)); + pattern_string) + .span_label(sp, &format!("pattern `{}` not covered", pattern_string)) + .emit(); }, _ => { let pattern_strings: Vec<_> = witnesses.iter().map(|w| { @@ -452,60 +458,37 @@ fn const_val_to_expr(value: &ConstVal) -> P { _ => bug!() }; P(hir::Expr { - id: 0, + id: DUMMY_NODE_ID, node: hir::ExprLit(P(Spanned { node: node, span: DUMMY_SP })), span: DUMMY_SP, attrs: ast::ThinVec::new(), }) } -pub struct StaticInliner<'a, 'tcx: 'a> { - pub tcx: TyCtxt<'a, 'tcx, 'tcx>, - pub failed: bool, - pub renaming_map: Option<&'a mut FnvHashMap<(NodeId, Span), NodeId>>, +struct StaticInliner<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + failed: bool } impl<'a, 'tcx> StaticInliner<'a, 'tcx> { - pub fn new<'b>(tcx: TyCtxt<'b, 'tcx, 'tcx>, - renaming_map: Option<&'b mut FnvHashMap<(NodeId, Span), NodeId>>) - -> StaticInliner<'b, 'tcx> { + pub fn new<'b>(tcx: TyCtxt<'b, 'tcx, 'tcx>) -> StaticInliner<'b, 'tcx> { StaticInliner { tcx: tcx, - failed: false, - renaming_map: renaming_map + failed: false } } } -struct RenamingRecorder<'map> { - substituted_node_id: NodeId, - origin_span: Span, - renaming_map: &'map mut FnvHashMap<(NodeId, Span), NodeId> -} - -impl<'v, 'map> Visitor<'v> for RenamingRecorder<'map> { - fn visit_id(&mut self, node_id: NodeId) { - let key = (node_id, self.origin_span); - self.renaming_map.insert(key, self.substituted_node_id); - } -} - -impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> { +impl<'a, 'tcx> StaticInliner<'a, 'tcx> { fn fold_pat(&mut self, pat: P) -> P { - return match pat.node { + match pat.node { PatKind::Path(..) => { match self.tcx.expect_def(pat.id) { Def::AssociatedConst(did) | Def::Const(did) => { let substs = Some(self.tcx.node_id_item_substs(pat.id).substs); if let Some((const_expr, _)) = lookup_const_by_id(self.tcx, did, substs) { match const_expr_to_pat(self.tcx, const_expr, pat.id, pat.span) { - Ok(new_pat) => { - if let Some(ref mut map) = self.renaming_map { - // Record any renamings we do here - record_renamings(const_expr, &pat, map); - } - new_pat - } + Ok(new_pat) => return new_pat, Err(def_id) => { self.failed = true; self.tcx.sess.span_err( @@ -513,33 +496,62 @@ impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> { &format!("constants of the type `{}` \ cannot be used in patterns", self.tcx.item_path_str(def_id))); - pat } } } else { self.failed = true; span_err!(self.tcx.sess, pat.span, E0158, "statics cannot be referenced in patterns"); - pat } } - _ => noop_fold_pat(pat, self) + _ => {} } } - _ => noop_fold_pat(pat, self) - }; + _ => {} + } - fn record_renamings(const_expr: &hir::Expr, - substituted_pat: &hir::Pat, - renaming_map: &mut FnvHashMap<(NodeId, Span), NodeId>) { - let mut renaming_recorder = RenamingRecorder { - substituted_node_id: substituted_pat.id, - origin_span: substituted_pat.span, - renaming_map: renaming_map, + pat.map(|Pat { id, node, span }| { + let node = match node { + PatKind::Binding(binding_mode, pth1, sub) => { + PatKind::Binding(binding_mode, pth1, sub.map(|x| self.fold_pat(x))) + } + PatKind::TupleStruct(pth, pats, ddpos) => { + PatKind::TupleStruct(pth, pats.move_map(|x| self.fold_pat(x)), ddpos) + } + PatKind::Struct(pth, fields, etc) => { + let fs = fields.move_map(|f| { + Spanned { + span: f.span, + node: hir::FieldPat { + name: f.node.name, + pat: self.fold_pat(f.node.pat), + is_shorthand: f.node.is_shorthand, + }, + } + }); + PatKind::Struct(pth, fs, etc) + } + PatKind::Tuple(elts, ddpos) => { + PatKind::Tuple(elts.move_map(|x| self.fold_pat(x)), ddpos) + } + PatKind::Box(inner) => PatKind::Box(self.fold_pat(inner)), + PatKind::Ref(inner, mutbl) => PatKind::Ref(self.fold_pat(inner), mutbl), + PatKind::Vec(before, slice, after) => { + PatKind::Vec(before.move_map(|x| self.fold_pat(x)), + slice.map(|x| self.fold_pat(x)), + after.move_map(|x| self.fold_pat(x))) + } + PatKind::Wild | + PatKind::Lit(_) | + PatKind::Range(..) | + PatKind::Path(..) => node }; - - renaming_recorder.visit_expr(const_expr); - } + Pat { + id: id, + node: node, + span: span + } + }) } } @@ -563,10 +575,10 @@ fn construct_witness<'a,'tcx>(cx: &MatchCheckCtxt<'a,'tcx>, ctor: &Constructor, let pat = match left_ty.sty { ty::TyTuple(..) => PatKind::Tuple(pats.collect(), None), - ty::TyEnum(adt, _) | ty::TyStruct(adt, _) => { + ty::TyAdt(adt, _) => { let v = ctor.variant_for_adt(adt); match v.kind { - VariantKind::Struct => { + ty::VariantKind::Struct => { let field_pats: hir::HirVec<_> = v.fields.iter() .zip(pats) .filter(|&(_, ref pat)| pat.node != PatKind::Wild) @@ -581,10 +593,10 @@ fn construct_witness<'a,'tcx>(cx: &MatchCheckCtxt<'a,'tcx>, ctor: &Constructor, let has_more_fields = field_pats.len() < pats_len; PatKind::Struct(def_to_path(cx.tcx, v.did), field_pats, has_more_fields) } - VariantKind::Tuple => { + ty::VariantKind::Tuple => { PatKind::TupleStruct(def_to_path(cx.tcx, v.did), pats.collect(), None) } - VariantKind::Unit => { + ty::VariantKind::Unit => { PatKind::Path(None, def_to_path(cx.tcx, v.did)) } } @@ -617,7 +629,7 @@ fn construct_witness<'a,'tcx>(cx: &MatchCheckCtxt<'a,'tcx>, ctor: &Constructor, }; P(hir::Pat { - id: 0, + id: DUMMY_NODE_ID, node: pat, span: DUMMY_SP }) @@ -626,7 +638,7 @@ fn construct_witness<'a,'tcx>(cx: &MatchCheckCtxt<'a,'tcx>, ctor: &Constructor, impl Constructor { fn variant_for_adt<'tcx, 'container, 'a>(&self, adt: &'a ty::AdtDefData<'tcx, 'container>) - -> &'a VariantDefData<'tcx, 'container> { + -> &'a ty::VariantDefData<'tcx, 'container> { match self { &Variant(vid) => adt.variant_with_id(vid), _ => adt.struct_variant() @@ -656,7 +668,8 @@ fn all_constructors(_cx: &MatchCheckCtxt, left_ty: Ty, [true, false].iter().map(|b| ConstantValue(ConstVal::Bool(*b))).collect(), ty::TySlice(_) => (0..max_slice_length+1).map(|length| Slice(length)).collect(), - ty::TyEnum(def, _) => def.variants.iter().map(|v| Variant(v.did)).collect(), + ty::TyAdt(def, _) if def.is_enum() => + def.variants.iter().map(|v| Variant(v.did)).collect(), _ => vec![Single] } } @@ -788,8 +801,9 @@ fn pat_constructors(cx: &MatchCheckCtxt, p: &Pat, match pat.node { PatKind::Struct(..) | PatKind::TupleStruct(..) | PatKind::Path(..) => match cx.tcx.expect_def(pat.id) { - Def::Variant(_, id) => vec![Variant(id)], - Def::Struct(..) | Def::TyAlias(..) | Def::AssociatedTy(..) => vec![Single], + Def::Variant(id) => vec![Variant(id)], + Def::Struct(..) | Def::Union(..) | + Def::TyAlias(..) | Def::AssociatedTy(..) => vec![Single], Def::Const(..) | Def::AssociatedConst(..) => span_bug!(pat.span, "const pattern should've been rewritten"), def => span_bug!(pat.span, "pat_constructors: unexpected definition {:?}", def), @@ -800,7 +814,7 @@ fn pat_constructors(cx: &MatchCheckCtxt, p: &Pat, vec![ConstantRange(eval_const_expr(cx.tcx, &lo), eval_const_expr(cx.tcx, &hi))], PatKind::Vec(ref before, ref slice, ref after) => match left_ty.sty { - ty::TyArray(_, _) => vec![Single], + ty::TyArray(..) => vec![Single], ty::TySlice(_) if slice.is_some() => { (before.len() + after.len()..max_slice_length+1) .map(|length| Slice(length)) @@ -833,7 +847,7 @@ pub fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> us _ => bug!() }, ty::TyRef(..) => 1, - ty::TyEnum(adt, _) | ty::TyStruct(adt, _) => { + ty::TyAdt(adt, _) => { ctor.variant_for_adt(adt).fields.len() } ty::TyArray(_, n) => n, @@ -862,8 +876,8 @@ fn wrap_pat<'a, 'b, 'tcx>(cx: &MatchCheckCtxt<'b, 'tcx>, { let pat_ty = cx.tcx.pat_ty(pat); (pat, Some(match pat.node { - PatKind::Binding(hir::BindByRef(..), _, _) => { - pat_ty.builtin_deref(false, NoPreference).unwrap().ty + PatKind::Binding(hir::BindByRef(..), ..) => { + pat_ty.builtin_deref(false, ty::NoPreference).unwrap().ty } _ => pat_ty })) @@ -899,7 +913,7 @@ pub fn specialize<'a, 'b, 'tcx>( Def::Const(..) | Def::AssociatedConst(..) => span_bug!(pat_span, "const pattern should've \ been rewritten"), - Def::Variant(_, id) if *constructor != Variant(id) => None, + Def::Variant(id) if *constructor != Variant(id) => None, Def::Variant(..) | Def::Struct(..) => Some(Vec::new()), def => span_bug!(pat_span, "specialize: unexpected \ definition {:?}", def), @@ -911,7 +925,7 @@ pub fn specialize<'a, 'b, 'tcx>( Def::Const(..) | Def::AssociatedConst(..) => span_bug!(pat_span, "const pattern should've \ been rewritten"), - Def::Variant(_, id) if *constructor != Variant(id) => None, + Def::Variant(id) if *constructor != Variant(id) => None, Def::Variant(..) | Def::Struct(..) => { match ddpos { Some(ddpos) => { @@ -1037,7 +1051,7 @@ pub fn specialize<'a, 'b, 'tcx>( fn check_local(cx: &mut MatchCheckCtxt, loc: &hir::Local) { intravisit::walk_local(cx, loc); - let pat = StaticInliner::new(cx.tcx, None).fold_pat(loc.pat.clone()); + let pat = StaticInliner::new(cx.tcx).fold_pat(loc.pat.clone()); check_irrefutable(cx, &pat, false); // Check legality of move bindings and `@` patterns. @@ -1053,7 +1067,7 @@ fn check_fn(cx: &mut MatchCheckCtxt, fn_id: NodeId) { match kind { FnKind::Closure(_) => {} - _ => cx.param_env = ParameterEnvironment::for_item(cx.tcx, fn_id), + _ => cx.param_env = ty::ParameterEnvironment::for_item(cx.tcx, fn_id), } intravisit::walk_fn(cx, kind, decl, body, sp, fn_id); @@ -1073,11 +1087,12 @@ fn check_irrefutable(cx: &MatchCheckCtxt, pat: &Pat, is_fn_arg: bool) { }; is_refutable(cx, pat, |uncovered_pat| { - span_err!(cx.tcx.sess, pat.span, E0005, + let pattern_string = pat_to_string(uncovered_pat); + struct_span_err!(cx.tcx.sess, pat.span, E0005, "refutable pattern in {}: `{}` not covered", origin, - pat_to_string(uncovered_pat), - ); + pattern_string, + ).span_label(pat.span, &format!("pattern `{}` not covered", pattern_string)).emit(); }); } @@ -1120,10 +1135,11 @@ fn check_legality_of_move_bindings(cx: &MatchCheckCtxt, .span_label(p.span, &format!("moves value into pattern guard")) .emit(); } else if by_ref_span.is_some() { - let mut err = struct_span_err!(cx.tcx.sess, p.span, E0009, - "cannot bind by-move and by-ref in the same pattern"); - span_note!(&mut err, by_ref_span.unwrap(), "by-ref binding occurs here"); - err.emit(); + struct_span_err!(cx.tcx.sess, p.span, E0009, + "cannot bind by-move and by-ref in the same pattern") + .span_label(p.span, &format!("by-move pattern here")) + .span_label(by_ref_span.unwrap(), &format!("both by-ref and by-move used")) + .emit(); } }; @@ -1170,17 +1186,17 @@ impl<'a, 'gcx, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'gcx> { _: NodeId, span: Span, _: cmt, - _: Region, - kind: BorrowKind, + _: &'tcx ty::Region, + kind:ty:: BorrowKind, _: LoanCause) { match kind { - MutBorrow => { + ty::MutBorrow => { struct_span_err!(self.cx.tcx.sess, span, E0301, "cannot mutably borrow in a pattern guard") .span_label(span, &format!("borrowed mutably in pattern guard")) .emit(); } - ImmBorrow | UniqueImmBorrow => {} + ty::ImmBorrow | ty::UniqueImmBorrow => {} } } fn decl_without_init(&mut self, _: NodeId, _: Span) {} @@ -1211,7 +1227,7 @@ struct AtBindingPatternVisitor<'a, 'b:'a, 'tcx:'b> { impl<'a, 'b, 'tcx, 'v> Visitor<'v> for AtBindingPatternVisitor<'a, 'b, 'tcx> { fn visit_pat(&mut self, pat: &Pat) { match pat.node { - PatKind::Binding(_, _, ref subpat) => { + PatKind::Binding(.., ref subpat) => { if !self.bindings_allowed { span_err!(self.cx.tcx.sess, pat.span, E0303, "pattern bindings are not allowed after an `@`"); diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs index 73b54c4374..4ae3c7d37d 100644 --- a/src/librustc_const_eval/eval.rs +++ b/src/librustc_const_eval/eval.rs @@ -22,8 +22,9 @@ use rustc::traits; use rustc::hir::def::{Def, PathResolution}; use rustc::hir::def_id::DefId; use rustc::hir::pat_util::def_to_path; -use rustc::ty::{self, Ty, TyCtxt, subst}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::util::IntTypeExt; +use rustc::ty::subst::Substs; use rustc::traits::Reveal; use rustc::util::common::ErrorReported; use rustc::util::nodemap::NodeMap; @@ -56,7 +57,6 @@ macro_rules! math { } fn lookup_variant_by_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - enum_def: DefId, variant_def: DefId) -> Option<&'tcx Expr> { fn variant_expr<'a>(variants: &'a [hir::Variant], id: ast::NodeId) @@ -69,8 +69,8 @@ fn lookup_variant_by_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, None } - if let Some(enum_node_id) = tcx.map.as_local_node_id(enum_def) { - let variant_node_id = tcx.map.as_local_node_id(variant_def).unwrap(); + if let Some(variant_node_id) = tcx.map.as_local_node_id(variant_def) { + let enum_node_id = tcx.map.get_parent(variant_node_id); match tcx.map.find(enum_node_id) { None => None, Some(ast_map::NodeItem(it)) => match it.node { @@ -93,7 +93,7 @@ fn lookup_variant_by_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, /// This generally happens in late/trans const evaluation. pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, - substs: Option<&'tcx subst::Substs<'tcx>>) + substs: Option<&'tcx Substs<'tcx>>) -> Option<(&'tcx Expr, Option>)> { if let Some(node_id) = tcx.map.as_local_node_id(def_id) { match tcx.map.find(node_id) { @@ -105,12 +105,13 @@ pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, _ => None }, Some(ast_map::NodeTraitItem(ti)) => match ti.node { - hir::ConstTraitItem(_, _) => { + hir::ConstTraitItem(..) => { if let Some(substs) = substs { // If we have a trait item and the substitutions for it, // `resolve_trait_associated_const` will select an impl // or the default. - let trait_id = tcx.trait_of_item(def_id).unwrap(); + let trait_id = tcx.map.get_parent(node_id); + let trait_id = tcx.map.local_def_id(trait_id); resolve_trait_associated_const(tcx, ti, trait_id, substs) } else { // Technically, without knowing anything about the @@ -149,7 +150,7 @@ pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, _ => None }, Some((&InlinedItem::TraitItem(trait_id, ref ti), _)) => match ti.node { - hir::ConstTraitItem(_, _) => { + hir::ConstTraitItem(..) => { used_substs = true; if let Some(substs) = substs { // As mentioned in the comments above for in-crate @@ -226,10 +227,10 @@ pub fn lookup_const_fn_by_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefI }; match fn_like.kind() { - FnKind::ItemFn(_, _, _, hir::Constness::Const, _, _, _) => { + FnKind::ItemFn(_, _, _, hir::Constness::Const, ..) => { Some(fn_like) } - FnKind::Method(_, m, _, _) => { + FnKind::Method(_, m, ..) => { if m.constness == hir::Constness::Const { Some(fn_like) } else { @@ -255,8 +256,11 @@ pub fn const_expr_to_pat<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, span, format!("floating point constants cannot be used in patterns")); } - ty::TyEnum(adt_def, _) | - ty::TyStruct(adt_def, _) => { + ty::TyAdt(adt_def, _) if adt_def.is_union() => { + // Matching on union fields is unsafe, we can't hide it in constants + tcx.sess.span_err(span, "cannot use unions in constant patterns"); + } + ty::TyAdt(adt_def, _) => { if !tcx.has_attr(adt_def.did, "structural_match") { tcx.sess.add_lint( lint::builtin::ILLEGAL_STRUCT_OR_ENUM_CONSTANT_PATTERN, @@ -273,9 +277,9 @@ pub fn const_expr_to_pat<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } let pat = match expr.node { hir::ExprTup(ref exprs) => - PatKind::Tuple(try!(exprs.iter() - .map(|expr| const_expr_to_pat(tcx, &expr, pat_id, span)) - .collect()), None), + PatKind::Tuple(exprs.iter() + .map(|expr| const_expr_to_pat(tcx, &expr, pat_id, span)) + .collect::>()?, None), hir::ExprCall(ref callee, ref args) => { let def = tcx.expect_def(callee.id); @@ -284,7 +288,7 @@ pub fn const_expr_to_pat<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } let path = match def { Def::Struct(def_id) => def_to_path(tcx, def_id), - Def::Variant(_, variant_did) => def_to_path(tcx, variant_did), + Def::Variant(variant_did) => def_to_path(tcx, variant_did), Def::Fn(..) | Def::Method(..) => return Ok(P(hir::Pat { id: expr.id, node: PatKind::Lit(P(expr.clone())), @@ -292,34 +296,31 @@ pub fn const_expr_to_pat<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, })), _ => bug!() }; - let pats = try!(args.iter() - .map(|expr| const_expr_to_pat(tcx, &**expr, - pat_id, span)) - .collect()); + let pats = args.iter() + .map(|expr| const_expr_to_pat(tcx, &**expr, pat_id, span)) + .collect::>()?; PatKind::TupleStruct(path, pats, None) } hir::ExprStruct(ref path, ref fields, None) => { let field_pats = - try!(fields.iter() - .map(|field| Ok(codemap::Spanned { - span: syntax_pos::DUMMY_SP, - node: hir::FieldPat { - name: field.name.node, - pat: try!(const_expr_to_pat(tcx, &field.expr, - pat_id, span)), - is_shorthand: false, - }, - })) - .collect()); + fields.iter() + .map(|field| Ok(codemap::Spanned { + span: syntax_pos::DUMMY_SP, + node: hir::FieldPat { + name: field.name.node, + pat: const_expr_to_pat(tcx, &field.expr, pat_id, span)?, + is_shorthand: false, + }, + })) + .collect::>()?; PatKind::Struct(path.clone(), field_pats, false) } hir::ExprVec(ref exprs) => { - let pats = try!(exprs.iter() - .map(|expr| const_expr_to_pat(tcx, &expr, - pat_id, span)) - .collect()); + let pats = exprs.iter() + .map(|expr| const_expr_to_pat(tcx, &expr, pat_id, span)) + .collect::>()?; PatKind::Vec(pats, None, hir::HirVec::new()) } @@ -806,8 +807,8 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, signal!(e, NonConstPath); } }, - Def::Variant(enum_def, variant_def) => { - if let Some(const_expr) = lookup_variant_by_id(tcx, enum_def, variant_def) { + Def::Variant(variant_def) => { + if let Some(const_expr) = lookup_variant_by_id(tcx, variant_def) { match eval_const_expr_partial(tcx, const_expr, ty_hint, None) { Ok(val) => val, Err(err) => { @@ -822,7 +823,8 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, Def::Struct(..) => { ConstVal::Struct(e.id) } - Def::Local(_, id) => { + Def::Local(def_id) => { + let id = tcx.map.as_local_node_id(def_id).unwrap(); debug!("Def::Local({:?}): {:?}", id, fn_args); if let Some(val) = fn_args.and_then(|args| args.get(&id)) { val.clone() @@ -866,7 +868,7 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, debug!("const call({:?})", call_args); eval_const_expr_partial(tcx, &result, ty_hint, Some(&call_args))? }, - hir::ExprLit(ref lit) => match lit_to_const(&lit.node, tcx, ety, lit.span) { + hir::ExprLit(ref lit) => match lit_to_const(&lit.node, tcx, ety) { Ok(val) => val, Err(err) => signal!(e, err), }, @@ -1033,7 +1035,7 @@ fn infer<'a, 'tcx>(i: ConstInt, (&ty::TyInt(ity), i) => Err(TypeMismatch(ity.to_string(), i)), (&ty::TyUint(ity), i) => Err(TypeMismatch(ity.to_string(), i)), - (&ty::TyEnum(ref adt, _), i) => { + (&ty::TyAdt(adt, _), i) if adt.is_enum() => { let hints = tcx.lookup_repr_hints(adt.did); let int_ty = tcx.enum_repr_type(hints.iter().next()); infer(i, tcx, &int_ty.to_ty(tcx).sty) @@ -1045,16 +1047,14 @@ fn infer<'a, 'tcx>(i: ConstInt, fn resolve_trait_associated_const<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ti: &'tcx hir::TraitItem, trait_id: DefId, - rcvr_substs: &'tcx subst::Substs<'tcx>) + rcvr_substs: &'tcx Substs<'tcx>) -> Option<(&'tcx Expr, Option>)> { - let trait_ref = ty::Binder( - rcvr_substs.clone().erase_regions().to_trait_ref(tcx, trait_id) - ); + let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, rcvr_substs)); debug!("resolve_trait_associated_const: trait_ref={:?}", trait_ref); - tcx.populate_implementations_for_trait_if_necessary(trait_ref.def_id()); + tcx.populate_implementations_for_trait_if_necessary(trait_id); tcx.infer_ctxt(None, None, Reveal::NotSpecializable).enter(|infcx| { let mut selcx = traits::SelectionContext::new(&infcx); let obligation = traits::Obligation::new(traits::ObligationCause::dummy(), @@ -1079,8 +1079,14 @@ fn resolve_trait_associated_const<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // when constructing the inference context above. match selection { traits::VtableImpl(ref impl_data) => { - match tcx.associated_consts(impl_data.impl_def_id) - .iter().find(|ic| ic.name == ti.name) { + let ac = tcx.impl_or_trait_items(impl_data.impl_def_id) + .iter().filter_map(|&def_id| { + match tcx.impl_or_trait_item(def_id) { + ty::ConstTraitItem(ic) => Some(ic), + _ => None + } + }).find(|ic| ic.name == ti.name); + match ac { Some(ic) => lookup_const_by_id(tcx, ic.def_id, None), None => match ti.node { hir::ConstTraitItem(ref ty, Some(ref expr)) => { @@ -1204,8 +1210,7 @@ fn cast_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, val: ConstVal, ty: ty::Ty) fn lit_to_const<'a, 'tcx>(lit: &ast::LitKind, tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty_hint: Option>, - span: Span) + ty_hint: Option>) -> Result { use syntax::ast::*; use syntax::ast::LitIntType::*; @@ -1226,7 +1231,7 @@ fn lit_to_const<'a, 'tcx>(lit: &ast::LitKind, infer(Infer(n), tcx, &ty::TyUint(uty)).map(Integral) }, None => Ok(Integral(Infer(n))), - Some(&ty::TyEnum(ref adt, _)) => { + Some(&ty::TyAdt(adt, _)) => { let hints = tcx.lookup_repr_hints(adt.did); let int_ty = tcx.enum_repr_type(hints.iter().next()); infer(Infer(n), tcx, &int_ty.to_ty(tcx).sty).map(Integral) @@ -1239,21 +1244,22 @@ fn lit_to_const<'a, 'tcx>(lit: &ast::LitKind, }, LitKind::Float(ref n, fty) => { - Ok(Float(parse_float(n, Some(fty), span))) + parse_float(n, Some(fty)).map(Float) } LitKind::FloatUnsuffixed(ref n) => { let fty_hint = match ty_hint.map(|t| &t.sty) { Some(&ty::TyFloat(fty)) => Some(fty), _ => None }; - Ok(Float(parse_float(n, fty_hint, span))) + parse_float(n, fty_hint).map(Float) } LitKind::Bool(b) => Ok(Bool(b)), LitKind::Char(c) => Ok(Char(c)), } } -fn parse_float(num: &str, fty_hint: Option, span: Span) -> ConstFloat { +fn parse_float(num: &str, fty_hint: Option) + -> Result { let val = match fty_hint { Some(ast::FloatTy::F32) => num.parse::().map(F32), Some(ast::FloatTy::F64) => num.parse::().map(F64), @@ -1265,9 +1271,9 @@ fn parse_float(num: &str, fty_hint: Option, span: Span) -> ConstFl }) } }; - val.unwrap_or_else(|_| { + val.map_err(|_| { // FIXME(#31407) this is only necessary because float parsing is buggy - span_bug!(span, "could not evaluate float literal (see issue #31407)"); + UnimplementedConstVal("could not evaluate float literal (see issue #31407)") }) } diff --git a/src/librustc_const_eval/lib.rs b/src/librustc_const_eval/lib.rs index a6714c178e..7b40269ba5 100644 --- a/src/librustc_const_eval/lib.rs +++ b/src/librustc_const_eval/lib.rs @@ -22,12 +22,12 @@ html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] - +#![feature(dotdot_in_tuple_patterns)] #![feature(rustc_private)] #![feature(staged_api)] #![feature(rustc_diagnostic_macros)] #![feature(slice_patterns)] -#![feature(question_mark)] +#![cfg_attr(stage0, feature(question_mark))] #![feature(box_patterns)] #![feature(box_syntax)] diff --git a/src/librustc_const_math/lib.rs b/src/librustc_const_math/lib.rs index 741dd4107e..31fccb41ce 100644 --- a/src/librustc_const_math/lib.rs +++ b/src/librustc_const_math/lib.rs @@ -25,7 +25,7 @@ #![feature(rustc_private)] #![feature(staged_api)] -#![feature(question_mark)] +#![cfg_attr(stage0, feature(question_mark))] #[macro_use] extern crate log; #[macro_use] extern crate syntax; diff --git a/src/librustdoc/flock.rs b/src/librustc_data_structures/flock.rs similarity index 57% rename from src/librustdoc/flock.rs rename to src/librustc_data_structures/flock.rs index 41bcfdb7cb..510c9ceef0 100644 --- a/src/librustdoc/flock.rs +++ b/src/librustc_data_structures/flock.rs @@ -15,6 +15,7 @@ //! librustdoc, it is not production quality at all. #![allow(non_camel_case_types)] +use std::path::Path; pub use self::imp::Lock; @@ -41,6 +42,7 @@ mod imp { pub l_sysid: libc::c_int, } + pub const F_RDLCK: libc::c_short = 0; pub const F_WRLCK: libc::c_short = 1; pub const F_UNLCK: libc::c_short = 2; pub const F_SETLK: libc::c_int = 6; @@ -60,6 +62,7 @@ mod imp { pub l_sysid: libc::c_int, } + pub const F_RDLCK: libc::c_short = 1; pub const F_UNLCK: libc::c_short = 2; pub const F_WRLCK: libc::c_short = 3; pub const F_SETLK: libc::c_int = 12; @@ -84,12 +87,34 @@ mod imp { pub l_sysid: libc::c_int, } + pub const F_RDLCK: libc::c_short = 1; pub const F_UNLCK: libc::c_short = 2; pub const F_WRLCK: libc::c_short = 3; pub const F_SETLK: libc::c_int = 8; pub const F_SETLKW: libc::c_int = 9; } + #[cfg(target_os = "haiku")] + mod os { + use libc; + + pub struct flock { + pub l_type: libc::c_short, + pub l_whence: libc::c_short, + pub l_start: libc::off_t, + pub l_len: libc::off_t, + pub l_pid: libc::pid_t, + + // not actually here, but brings in line with freebsd + pub l_sysid: libc::c_int, + } + + pub const F_UNLCK: libc::c_short = 0x0200; + pub const F_WRLCK: libc::c_short = 0x0400; + pub const F_SETLK: libc::c_int = 0x0080; + pub const F_SETLKW: libc::c_int = 0x0100; + } + #[cfg(any(target_os = "macos", target_os = "ios"))] mod os { use libc; @@ -105,6 +130,7 @@ mod imp { pub l_sysid: libc::c_int, } + pub const F_RDLCK: libc::c_short = 1; pub const F_UNLCK: libc::c_short = 2; pub const F_WRLCK: libc::c_short = 3; pub const F_SETLK: libc::c_int = 8; @@ -124,43 +150,66 @@ mod imp { pub l_pid: libc::pid_t, } + pub const F_RDLCK: libc::c_short = 1; pub const F_WRLCK: libc::c_short = 2; pub const F_UNLCK: libc::c_short = 3; pub const F_SETLK: libc::c_int = 6; pub const F_SETLKW: libc::c_int = 7; } + #[derive(Debug)] pub struct Lock { fd: libc::c_int, } impl Lock { - pub fn new(p: &Path) -> Lock { + pub fn new(p: &Path, + wait: bool, + create: bool, + exclusive: bool) + -> io::Result { let os: &OsStr = p.as_ref(); let buf = CString::new(os.as_bytes()).unwrap(); + let open_flags = if create { + libc::O_RDWR | libc::O_CREAT + } else { + libc::O_RDWR + }; + let fd = unsafe { - libc::open(buf.as_ptr(), libc::O_RDWR | libc::O_CREAT, + libc::open(buf.as_ptr(), open_flags, libc::S_IRWXU as libc::c_int) }; - assert!(fd > 0, "failed to open lockfile: {}", - io::Error::last_os_error()); + + if fd < 0 { + return Err(io::Error::last_os_error()); + } + + let lock_type = if exclusive { + os::F_WRLCK + } else { + os::F_RDLCK + }; + let flock = os::flock { l_start: 0, l_len: 0, l_pid: 0, l_whence: libc::SEEK_SET as libc::c_short, - l_type: os::F_WRLCK, + l_type: lock_type, l_sysid: 0, }; + let cmd = if wait { os::F_SETLKW } else { os::F_SETLK }; let ret = unsafe { - libc::fcntl(fd, os::F_SETLKW, &flock) + libc::fcntl(fd, cmd, &flock) }; if ret == -1 { let err = io::Error::last_os_error(); unsafe { libc::close(fd); } - panic!("could not lock `{}`: {}", p.display(), err); + Err(err) + } else { + Ok(Lock { fd: fd }) } - Lock { fd: fd } } } @@ -191,18 +240,27 @@ mod imp { use std::os::windows::raw::HANDLE; use std::path::Path; use std::fs::{File, OpenOptions}; + use std::os::raw::{c_ulong, c_ulonglong, c_int}; + + type DWORD = c_ulong; + type BOOL = c_int; + type ULONG_PTR = c_ulonglong; - type DWORD = u32; type LPOVERLAPPED = *mut OVERLAPPED; - type BOOL = i32; const LOCKFILE_EXCLUSIVE_LOCK: DWORD = 0x00000002; + const LOCKFILE_FAIL_IMMEDIATELY: DWORD = 0x00000001; + + const FILE_SHARE_DELETE: DWORD = 0x4; + const FILE_SHARE_READ: DWORD = 0x1; + const FILE_SHARE_WRITE: DWORD = 0x2; #[repr(C)] struct OVERLAPPED { - Internal: usize, - InternalHigh: usize, - Pointer: *mut u8, - hEvent: *mut u8, + Internal: ULONG_PTR, + InternalHigh: ULONG_PTR, + Offset: DWORD, + OffsetHigh: DWORD, + hEvent: HANDLE, } extern "system" { @@ -214,24 +272,88 @@ mod imp { lpOverlapped: LPOVERLAPPED) -> BOOL; } + #[derive(Debug)] pub struct Lock { _file: File, } impl Lock { - pub fn new(p: &Path) -> Lock { - let f = OpenOptions::new().read(true).write(true).create(true) - .open(p).unwrap(); + pub fn new(p: &Path, + wait: bool, + create: bool, + exclusive: bool) + -> io::Result { + assert!(p.parent().unwrap().exists(), + "Parent directory of lock-file must exist: {}", + p.display()); + + let share_mode = FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE; + + let mut open_options = OpenOptions::new(); + open_options.read(true) + .share_mode(share_mode); + + if create { + open_options.create(true) + .write(true); + } + + debug!("Attempting to open lock file `{}`", p.display()); + let file = match open_options.open(p) { + Ok(file) => { + debug!("Lock file opened successfully"); + file + } + Err(err) => { + debug!("Error opening lock file: {}", err); + return Err(err) + } + }; + let ret = unsafe { let mut overlapped: OVERLAPPED = mem::zeroed(); - LockFileEx(f.as_raw_handle(), LOCKFILE_EXCLUSIVE_LOCK, 0, 100, 0, + + let mut dwFlags = 0; + if !wait { + dwFlags |= LOCKFILE_FAIL_IMMEDIATELY; + } + + if exclusive { + dwFlags |= LOCKFILE_EXCLUSIVE_LOCK; + } + + debug!("Attempting to acquire lock on lock file `{}`", + p.display()); + LockFileEx(file.as_raw_handle(), + dwFlags, + 0, + 0xFFFF_FFFF, + 0xFFFF_FFFF, &mut overlapped) }; if ret == 0 { let err = io::Error::last_os_error(); - panic!("could not lock `{}`: {}", p.display(), err); + debug!("Failed acquiring file lock: {}", err); + Err(err) + } else { + debug!("Successfully acquired lock."); + Ok(Lock { _file: file }) } - Lock { _file: f } } } + + // Note that we don't need a Drop impl on the Windows: The file is unlocked + // automatically when it's closed. +} + +impl imp::Lock { + pub fn panicking_new(p: &Path, + wait: bool, + create: bool, + exclusive: bool) + -> Lock { + Lock::new(p, wait, create, exclusive).unwrap_or_else(|err| { + panic!("could not lock `{}`: {}", p.display(), err); + }) + } } diff --git a/src/librustc_data_structures/fnv.rs b/src/librustc_data_structures/fnv.rs index 0000c283a7..ae90c2fac8 100644 --- a/src/librustc_data_structures/fnv.rs +++ b/src/librustc_data_structures/fnv.rs @@ -35,6 +35,7 @@ pub fn FnvHashSet() -> FnvHashSet { pub struct FnvHasher(u64); impl Default for FnvHasher { + /// Creates a `FnvHasher`, with a 64-bit hex initial value. #[inline] fn default() -> FnvHasher { FnvHasher(0xcbf29ce484222325) @@ -57,3 +58,9 @@ impl Hasher for FnvHasher { self.0 } } + +pub fn hash(v: &T) -> u64 { + let mut state = FnvHasher::default(); + v.hash(&mut state); + state.finish() +} diff --git a/src/librustc_data_structures/lib.rs b/src/librustc_data_structures/lib.rs index 34c3961d5b..e7da18cef1 100644 --- a/src/librustc_data_structures/lib.rs +++ b/src/librustc_data_structures/lib.rs @@ -31,12 +31,15 @@ #![feature(unboxed_closures)] #![feature(fn_traits)] +#![cfg_attr(unix, feature(libc))] #![cfg_attr(test, feature(test))] extern crate core; #[macro_use] extern crate log; extern crate serialize as rustc_serialize; // used by deriving +#[cfg(unix)] +extern crate libc; pub mod bitvec; pub mod graph; @@ -51,6 +54,7 @@ pub mod fnv; pub mod tuple_slice; pub mod veccell; pub mod control_flow_graph; +pub mod flock; // See comments in src/librustc/lib.rs #[doc(hidden)] diff --git a/src/librustc_driver/Cargo.toml b/src/librustc_driver/Cargo.toml index 54c62d3665..772d83eb2c 100644 --- a/src/librustc_driver/Cargo.toml +++ b/src/librustc_driver/Cargo.toml @@ -33,4 +33,5 @@ rustc_metadata = { path = "../librustc_metadata" } serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } syntax_ext = { path = "../libsyntax_ext" } -syntax_pos = { path = "../libsyntax_pos" } \ No newline at end of file +syntax_pos = { path = "../libsyntax_pos" } +proc_macro = { path = "../libproc_macro" } diff --git a/src/librustc_driver/derive_registrar.rs b/src/librustc_driver/derive_registrar.rs new file mode 100644 index 0000000000..ea7621e16e --- /dev/null +++ b/src/librustc_driver/derive_registrar.rs @@ -0,0 +1,37 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::dep_graph::DepNode; +use rustc::hir::intravisit::Visitor; +use rustc::hir::map::Map; +use rustc::hir; +use syntax::ast; +use syntax::attr; + +pub fn find(hir_map: &Map) -> Option { + let _task = hir_map.dep_graph.in_task(DepNode::PluginRegistrar); + let krate = hir_map.krate(); + + let mut finder = Finder { registrar: None }; + krate.visit_all_items(&mut finder); + finder.registrar +} + +struct Finder { + registrar: Option, +} + +impl<'v> Visitor<'v> for Finder { + fn visit_item(&mut self, item: &hir::Item) { + if attr::contains_name(&item.attrs, "rustc_derive_registrar") { + self.registrar = Some(item.id); + } + } +} diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index c610000478..9d5dce7ad0 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -26,10 +26,9 @@ use rustc::util::common::time; use rustc::util::nodemap::NodeSet; use rustc_back::sha2::{Sha256, Digest}; use rustc_borrowck as borrowck; -use rustc_incremental; +use rustc_incremental::{self, IncrementalHashesMap}; use rustc_resolve::{MakeGlobMap, Resolver}; -use rustc_metadata::macro_import; -use rustc_metadata::creader::read_local_crates; +use rustc_metadata::creader::CrateLoader; use rustc_metadata::cstore::CStore; use rustc_trans::back::{link, write}; use rustc_trans as trans; @@ -44,17 +43,21 @@ use super::Compilation; use serialize::json; use std::env; +use std::mem; use std::ffi::{OsString, OsStr}; use std::fs; use std::io::{self, Write}; use std::path::{Path, PathBuf}; use syntax::{ast, diagnostics, visit}; -use syntax::attr::{self, AttrMetaMethods}; +use syntax::attr; +use syntax::ext::base::ExtCtxt; use syntax::parse::{self, PResult, token}; use syntax::util::node_count::NodeCounter; use syntax; use syntax_ext; +use derive_registrar; + #[derive(Clone)] pub struct Resolutions { pub def_map: DefMap, @@ -88,7 +91,7 @@ pub fn compile_input(sess: &Session, // We need nested scopes here, because the intermediate results can keep // large chunks of memory alive and we want to free them as soon as // possible to keep the peak memory usage low - let (outputs, trans, crate_name) = { + let (outputs, trans) = { let krate = match phase_1_parse_input(sess, cfg, input) { Ok(krate) => krate, Err(mut parse_error) => { @@ -97,7 +100,7 @@ pub fn compile_input(sess: &Session, } }; - let krate = { + let (krate, registry) = { let mut compile_state = CompileState::state_after_parse(input, sess, outdir, @@ -109,14 +112,14 @@ pub fn compile_input(sess: &Session, compile_state, Ok(())); - compile_state.krate.unwrap() + (compile_state.krate.unwrap(), compile_state.registry) }; let outputs = build_output_filenames(input, outdir, output, &krate.attrs, sess); let crate_name = link::find_crate_name(Some(sess), &krate.attrs, input); let ExpansionResult { expanded_crate, defs, analysis, resolutions, mut hir_forest } = { phase_2_configure_and_expand( - sess, &cstore, krate, &crate_name, addl_plugins, control.make_glob_map, + sess, &cstore, krate, registry, &crate_name, addl_plugins, control.make_glob_map, |expanded_crate| { let mut state = CompileState::state_after_expand( input, sess, outdir, output, &cstore, expanded_crate, &crate_name, @@ -172,7 +175,7 @@ pub fn compile_input(sess: &Session, resolutions, &arenas, &crate_name, - |tcx, mir_map, analysis, result| { + |tcx, mir_map, analysis, incremental_hashes_map, result| { { // Eventually, we will want to track plugins. let _ignore = tcx.dep_graph.in_ignore(); @@ -202,7 +205,8 @@ pub fn compile_input(sess: &Session, } let trans = phase_4_translate_to_llvm(tcx, mir_map.unwrap(), - analysis); + analysis, + &incremental_hashes_map); if log_enabled!(::log::INFO) { println!("Post-trans"); @@ -212,11 +216,11 @@ pub fn compile_input(sess: &Session, // Discard interned strings as they are no longer required. token::clear_ident_interner(); - Ok((outputs, trans, crate_name.clone())) + Ok((outputs, trans)) })?? }; - let phase5_result = phase_5_run_llvm_passes(sess, &crate_name, &trans, &outputs); + let phase5_result = phase_5_run_llvm_passes(sess, &trans, &outputs); controller_entry_point!(after_llvm, sess, @@ -228,6 +232,14 @@ pub fn compile_input(sess: &Session, phase_6_link_output(sess, &trans, &outputs); + // Now that we won't touch anything in the incremental compilation directory + // any more, we can finalize it (which involves renaming it) + rustc_incremental::finalize_session_directory(sess, trans.link.crate_hash); + + if sess.opts.debugging_opts.perf_stats { + sess.print_perf_stats(); + } + controller_entry_point!(compilation_done, sess, CompileState::state_when_compilation_done(input, sess, outdir, output), @@ -243,7 +255,8 @@ fn keep_hygiene_data(sess: &Session) -> bool { fn keep_ast(sess: &Session) -> bool { sess.opts.debugging_opts.keep_ast || sess.opts.debugging_opts.save_analysis || - sess.opts.debugging_opts.save_analysis_csv + sess.opts.debugging_opts.save_analysis_csv || + sess.opts.debugging_opts.save_analysis_api } /// The name used for source code that doesn't originate in a file @@ -324,6 +337,7 @@ pub struct CompileState<'a, 'b, 'ast: 'a, 'tcx: 'b> where 'ast: 'tcx { pub input: &'a Input, pub session: &'ast Session, pub krate: Option, + pub registry: Option>, pub cstore: Option<&'a CStore>, pub crate_name: Option<&'a str>, pub output_filenames: Option<&'a OutputFilenames>, @@ -352,6 +366,7 @@ impl<'a, 'b, 'ast, 'tcx> CompileState<'a, 'b, 'ast, 'tcx> { out_file: None, arenas: None, krate: None, + registry: None, cstore: None, crate_name: None, output_filenames: None, @@ -374,6 +389,8 @@ impl<'a, 'b, 'ast, 'tcx> CompileState<'a, 'b, 'ast, 'tcx> { cstore: &'a CStore) -> CompileState<'a, 'b, 'ast, 'tcx> { CompileState { + // Initialize the registry before moving `krate` + registry: Some(Registry::new(&session, krate.span)), krate: Some(krate), cstore: Some(cstore), out_file: out_file.as_ref().map(|s| &**s), @@ -539,7 +556,8 @@ pub struct ExpansionResult<'a> { /// Returns `None` if we're aborting after handling -W help. pub fn phase_2_configure_and_expand<'a, F>(sess: &Session, cstore: &CStore, - mut krate: ast::Crate, + krate: ast::Crate, + registry: Option, crate_name: &'a str, addl_plugins: Option>, make_glob_map: MakeGlobMap, @@ -549,21 +567,9 @@ pub fn phase_2_configure_and_expand<'a, F>(sess: &Session, { let time_passes = sess.time_passes(); - // strip before anything else because crate metadata may use #[cfg_attr] - // and so macros can depend on configuration variables, such as - // - // #[macro_use] #[cfg(foo)] - // mod bar { macro_rules! baz!(() => {{}}) } - // - // baz! should not use this definition unless foo is enabled. - - krate = time(time_passes, "configuration", || { - let (krate, features) = - syntax::config::strip_unconfigured_items(krate, &sess.parse_sess, sess.opts.test); - // these need to be set "early" so that expansion sees `quote` if enabled. - *sess.features.borrow_mut() = features; - krate - }); + let (mut krate, features) = syntax::config::features(krate, &sess.parse_sess, sess.opts.test); + // these need to be set "early" so that expansion sees `quote` if enabled. + *sess.features.borrow_mut() = features; *sess.crate_types.borrow_mut() = collect_crate_types(sess, &krate.attrs); *sess.crate_disambiguator.borrow_mut() = @@ -587,7 +593,7 @@ pub fn phase_2_configure_and_expand<'a, F>(sess: &Session, addl_plugins.take().unwrap()) }); - let mut registry = Registry::new(sess, &krate); + let mut registry = registry.unwrap_or(Registry::new(sess, krate.span)); time(time_passes, "plugin registration", || { if sess.features.borrow().rustc_diagnostic_macros { @@ -633,6 +639,12 @@ pub fn phase_2_configure_and_expand<'a, F>(sess: &Session, } sess.track_errors(|| sess.lint_store.borrow_mut().process_command_line(sess))?; + let mut crate_loader = CrateLoader::new(sess, &cstore, &krate, crate_name); + let resolver_arenas = Resolver::arenas(); + let mut resolver = + Resolver::new(sess, &krate, make_glob_map, &mut crate_loader, &resolver_arenas); + syntax_ext::register_builtins(&mut resolver, sess.features.borrow().quote); + krate = time(time_passes, "expansion", || { // Windows dlls do not have rpaths, so they don't know how to find their // dependencies. It's up to us to tell the system where to find all the @@ -661,40 +673,42 @@ pub fn phase_2_configure_and_expand<'a, F>(sess: &Session, } let features = sess.features.borrow(); let cfg = syntax::ext::expand::ExpansionConfig { - crate_name: crate_name.to_string(), features: Some(&features), recursion_limit: sess.recursion_limit.get(), trace_mac: sess.opts.debugging_opts.trace_macros, should_test: sess.opts.test, + ..syntax::ext::expand::ExpansionConfig::default(crate_name.to_string()) }; - let mut loader = macro_import::MacroLoader::new(sess, - &cstore, - crate_name, - krate.config.clone()); - let mut ecx = syntax::ext::base::ExtCtxt::new(&sess.parse_sess, - krate.config.clone(), - cfg, - &mut loader); - syntax_ext::register_builtins(&mut ecx.syntax_env); + let mut ecx = ExtCtxt::new(&sess.parse_sess, krate.config.clone(), cfg, &mut resolver); let ret = syntax::ext::expand::expand_crate(&mut ecx, syntax_exts, krate); if cfg!(windows) { env::set_var("PATH", &old_path); } - *sess.available_macros.borrow_mut() = ecx.syntax_env.names; ret }); + krate.exported_macros = mem::replace(&mut resolver.exported_macros, Vec::new()); + krate = time(time_passes, "maybe building test harness", || { syntax::test::modify_for_testing(&sess.parse_sess, + &mut resolver, sess.opts.test, krate, sess.diagnostic()) }); - let resolver_arenas = Resolver::arenas(); - let mut resolver = Resolver::new(sess, make_glob_map, &resolver_arenas); - - let krate = time(sess.time_passes(), "assigning node ids", || resolver.assign_node_ids(krate)); + krate = time(time_passes, "maybe creating a macro crate", || { + let crate_types = sess.crate_types.borrow(); + let is_rustc_macro_crate = crate_types.contains(&config::CrateTypeRustcMacro); + let num_crate_types = crate_types.len(); + syntax_ext::rustc_macro_registrar::modify(&sess.parse_sess, + &mut resolver, + krate, + is_rustc_macro_crate, + num_crate_types, + sess.diagnostic(), + &sess.features.borrow()) + }); if sess.opts.debugging_opts.input_stats { println!("Post-expansion node count: {}", count_nodes(&krate)); @@ -722,11 +736,6 @@ pub fn phase_2_configure_and_expand<'a, F>(sess: &Session, // Collect defintions for def ids. time(sess.time_passes(), "collecting defs", || resolver.definitions.collect(&krate)); - time(sess.time_passes(), "external crate/lib resolution", || { - let defs = &resolver.definitions; - read_local_crates(sess, &cstore, defs, &krate, crate_name, &sess.dep_graph) - }); - time(sess.time_passes(), "early lint checks", || lint::check_ast_crate(sess, &krate)); @@ -797,14 +806,15 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, where F: for<'a> FnOnce(TyCtxt<'a, 'tcx, 'tcx>, Option>, ty::CrateAnalysis, + IncrementalHashesMap, CompileResult) -> R { macro_rules! try_with_f { - ($e: expr, ($t: expr, $m: expr, $a: expr)) => { + ($e: expr, ($t: expr, $m: expr, $a: expr, $h: expr)) => { match $e { Ok(x) => x, Err(x) => { - f($t, $m, $a, Err(x)); + f($t, $m, $a, $h, Err(x)); return Err(x); } } @@ -832,6 +842,7 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, sess.plugin_registrar_fn.set(time(time_passes, "looking for plugin registrar", || { plugin::build::find_plugin_registrar(sess.diagnostic(), &hir_map) })); + sess.derive_registrar_fn.set(derive_registrar::find(&hir_map)); let region_map = time(time_passes, "region resolution", @@ -860,12 +871,16 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, index, name, |tcx| { + let incremental_hashes_map = + time(time_passes, + "compute_incremental_hashes_map", + || rustc_incremental::compute_incremental_hashes_map(tcx)); time(time_passes, "load_dep_graph", - || rustc_incremental::load_dep_graph(tcx)); + || rustc_incremental::load_dep_graph(tcx, &incremental_hashes_map)); // passes are timed inside typeck - try_with_f!(typeck::check_crate(tcx), (tcx, None, analysis)); + try_with_f!(typeck::check_crate(tcx), (tcx, None, analysis, incremental_hashes_map)); time(time_passes, "const checking", @@ -935,7 +950,11 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, // lint warnings and so on -- kindck used to do this abort, but // kindck is gone now). -nmatsakis if sess.err_count() > 0 { - return Ok(f(tcx, Some(mir_map), analysis, Err(sess.err_count()))); + return Ok(f(tcx, + Some(mir_map), + analysis, + incremental_hashes_map, + Err(sess.err_count()))); } analysis.reachable = @@ -963,17 +982,22 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, // The above three passes generate errors w/o aborting if sess.err_count() > 0 { - return Ok(f(tcx, Some(mir_map), analysis, Err(sess.err_count()))); + return Ok(f(tcx, + Some(mir_map), + analysis, + incremental_hashes_map, + Err(sess.err_count()))); } - Ok(f(tcx, Some(mir_map), analysis, Ok(()))) + Ok(f(tcx, Some(mir_map), analysis, incremental_hashes_map, Ok(()))) }) } /// Run the translation phase to LLVM, after which the AST and analysis can pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mut mir_map: MirMap<'tcx>, - analysis: ty::CrateAnalysis) + analysis: ty::CrateAnalysis, + incremental_hashes_map: &IncrementalHashesMap) -> trans::CrateTranslation { let time_passes = tcx.sess.time_passes(); @@ -989,6 +1013,7 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, passes.push_pass(box mir::transform::no_landing_pads::NoLandingPads); passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg::new("no-landing-pads")); + // From here on out, regions are gone. passes.push_pass(box mir::transform::erase_regions::EraseRegions); passes.push_pass(box mir::transform::add_call_guards::AddCallGuards); @@ -996,7 +1021,10 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, passes.push_pass(box mir::transform::no_landing_pads::NoLandingPads); passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg::new("elaborate-drops")); + // No lifetime analysis based on borrowing can be done from here on out. + passes.push_pass(box mir::transform::instcombine::InstCombine::new()); passes.push_pass(box mir::transform::deaggregator::Deaggregator); + passes.push_pass(box mir::transform::copy_prop::CopyPropagation); passes.push_pass(box mir::transform::add_call_guards::AddCallGuards); passes.push_pass(box mir::transform::dump_mir::Marker("PreTrans")); @@ -1007,23 +1035,23 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let translation = time(time_passes, "translation", - move || trans::trans_crate(tcx, &mir_map, analysis)); + move || trans::trans_crate(tcx, &mir_map, analysis, &incremental_hashes_map)); time(time_passes, "assert dep graph", - move || rustc_incremental::assert_dep_graph(tcx)); + || rustc_incremental::assert_dep_graph(tcx)); time(time_passes, "serialize dep graph", - move || rustc_incremental::save_dep_graph(tcx)); - + || rustc_incremental::save_dep_graph(tcx, + &incremental_hashes_map, + translation.link.crate_hash)); translation } /// Run LLVM itself, producing a bitcode file, assembly file or object file /// as a side effect. pub fn phase_5_run_llvm_passes(sess: &Session, - crate_name: &str, trans: &trans::CrateTranslation, outputs: &OutputFilenames) -> CompileResult { if sess.opts.cg.no_integrated_as { @@ -1046,7 +1074,7 @@ pub fn phase_5_run_llvm_passes(sess: &Session, time(sess.time_passes(), "serialize work products", - move || rustc_incremental::save_work_products(sess, crate_name)); + move || rustc_incremental::save_work_products(sess)); if sess.err_count() > 0 { Err(sess.err_count()) @@ -1152,6 +1180,9 @@ pub fn collect_crate_types(session: &Session, attrs: &[ast::Attribute]) -> Vec { Some(config::CrateTypeStaticlib) } + Some(ref n) if *n == "rustc-macro" => { + Some(config::CrateTypeRustcMacro) + } Some(ref n) if *n == "bin" => Some(config::CrateTypeExecutable), Some(_) => { session.add_lint(lint::builtin::UNKNOWN_CRATE_TYPES, diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index f9a5331926..dfc4bcebd3 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -24,13 +24,14 @@ #![cfg_attr(not(stage0), deny(warnings))] #![feature(box_syntax)] +#![feature(dotdot_in_tuple_patterns)] #![feature(libc)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] #![feature(set_stdio)] #![feature(staged_api)] -#![feature(question_mark)] +#![cfg_attr(stage0, feature(question_mark))] extern crate arena; extern crate flate; @@ -72,7 +73,7 @@ use rustc_trans::back::write::{create_target_machine, RELOC_MODEL_ARGS, CODE_GEN use rustc::dep_graph::DepGraph; use rustc::session::{self, config, Session, build_session, CompileResult}; use rustc::session::config::{Input, PrintRequest, OutputType, ErrorOutputType}; -use rustc::session::config::{get_unstable_features_setting, nightly_options}; +use rustc::session::config::nightly_options; use rustc::lint::Lint; use rustc::lint; use rustc_metadata::loader; @@ -95,7 +96,6 @@ use std::thread; use rustc::session::early_error; use syntax::{ast, json}; -use syntax::attr::AttrMetaMethods; use syntax::codemap::{CodeMap, FileLoader, RealFileLoader}; use syntax::feature_gate::{GatedCfg, UnstableFeatures}; use syntax::parse::{self, PResult}; @@ -108,7 +108,7 @@ pub mod test; pub mod driver; pub mod pretty; pub mod target_features; - +mod derive_registrar; const BUG_REPORT_URL: &'static str = "https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.\ md#bug-reports"; @@ -556,7 +556,8 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { fn save_analysis(sess: &Session) -> bool { sess.opts.debugging_opts.save_analysis || - sess.opts.debugging_opts.save_analysis_csv + sess.opts.debugging_opts.save_analysis_csv || + sess.opts.debugging_opts.save_analysis_api } fn save_analysis_format(sess: &Session) -> save::Format { @@ -564,6 +565,8 @@ fn save_analysis_format(sess: &Session) -> save::Format { save::Format::Json } else if sess.opts.debugging_opts.save_analysis_csv { save::Format::Csv + } else if sess.opts.debugging_opts.save_analysis_api { + save::Format::JsonApi } else { unreachable!(); } @@ -646,26 +649,26 @@ impl RustcDefaultCalls { } } PrintRequest::Cfg => { - let allow_unstable_cfg = match get_unstable_features_setting() { - UnstableFeatures::Disallow => false, - _ => true, - }; + let allow_unstable_cfg = UnstableFeatures::from_environment() + .is_nightly_build(); for cfg in cfg { if !allow_unstable_cfg && GatedCfg::gate(&*cfg).is_some() { continue; } + if cfg.is_word() { println!("{}", cfg.name()); - } else if cfg.is_value_str() { - if let Some(s) = cfg.value_str() { - println!("{}=\"{}\"", cfg.name(), s); - } + } else if let Some(s) = cfg.value_str() { + println!("{}=\"{}\"", cfg.name(), s); } else if cfg.is_meta_item_list() { // Right now there are not and should not be any // MetaItemKind::List items in the configuration returned by // `build_configuration`. - panic!("MetaItemKind::List encountered in default cfg") + panic!("Found an unexpected list in cfg attribute '{}'!", cfg.name()) + } else { + // There also shouldn't be literals. + panic!("Found an unexpected literal in cfg attribute '{}'!", cfg.name()) } } } @@ -801,7 +804,7 @@ Available lint options: let (plugin_groups, builtin_groups): (Vec<_>, _) = lint_store.get_lint_groups() .iter() .cloned() - .partition(|&(_, _, p)| p); + .partition(|&(.., p)| p); let plugin_groups = sort_lint_groups(plugin_groups); let builtin_groups = sort_lint_groups(builtin_groups); @@ -861,7 +864,7 @@ Available lint options: for (name, to) in lints { let name = name.to_lowercase().replace("_", "-"); let desc = to.into_iter() - .map(|x| x.as_str().replace("_", "-")) + .map(|x| x.to_string().replace("_", "-")) .collect::>() .join(", "); println!(" {} {}", padded(&name[..]), desc); @@ -876,7 +879,7 @@ Available lint options: println!("Compiler plugins can provide additional lints and lint groups. To see a \ listing of these, re-run `rustc -W help` with a crate filename."); } - (false, _, _) => panic!("didn't load lint plugins but got them anyway!"), + (false, ..) => panic!("didn't load lint plugins but got them anyway!"), (true, 0, 0) => println!("This crate does not load any lint plugins or lint groups."), (true, l, g) => { if l > 0 { @@ -1135,6 +1138,7 @@ pub fn diagnostics_registry() -> errors::registry::Registry { all_errors.extend_from_slice(&rustc_privacy::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_trans::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_const_eval::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_metadata::DIAGNOSTICS); Registry::new(&all_errors) } diff --git a/src/librustc_driver/pretty.rs b/src/librustc_driver/pretty.rs index e3e06963ad..215287f843 100644 --- a/src/librustc_driver/pretty.rs +++ b/src/librustc_driver/pretty.rs @@ -234,7 +234,7 @@ impl PpSourceMode { resolutions.clone(), arenas, id, - |tcx, _, _, _| { + |tcx, _, _, _, _| { let annotation = TypedAnnotation { tcx: tcx, }; @@ -539,6 +539,7 @@ impl FromStr for UserIdentifiedItem { type Err = (); fn from_str(s: &str) -> Result { Ok(s.parse() + .map(ast::NodeId::new) .map(ItemViaNode) .unwrap_or_else(|_| ItemViaPath(s.split("::").map(|s| s.to_string()).collect()))) } @@ -951,7 +952,7 @@ fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session, resolutions.clone(), arenas, crate_name, - |tcx, mir_map, _, _| { + |tcx, mir_map, _, _, _| { match ppm { PpmMir | PpmMirCFG => { if let Some(mir_map) = mir_map { diff --git a/src/librustc_driver/test.rs b/src/librustc_driver/test.rs index 7711091685..f6772b8771 100644 --- a/src/librustc_driver/test.rs +++ b/src/librustc_driver/test.rs @@ -20,14 +20,14 @@ use rustc::middle::region::{self, CodeExtent}; use rustc::middle::region::CodeExtentData; use rustc::middle::resolve_lifetime; use rustc::middle::stability; -use rustc::ty::subst; -use rustc::ty::subst::Subst; +use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::traits::Reveal; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::infer::{self, InferOk, InferResult, TypeOrigin}; use rustc_metadata::cstore::CStore; use rustc::hir::map as hir_map; use rustc::session::{self, config}; +use std::iter; use std::rc::Rc; use syntax::ast; use syntax::abi::Abi; @@ -115,7 +115,7 @@ fn test_env(source_string: &str, let krate = driver::phase_1_parse_input(&sess, krate_config, &input).unwrap(); let driver::ExpansionResult { defs, resolutions, mut hir_forest, .. } = { driver::phase_2_configure_and_expand( - &sess, &cstore, krate, "test", None, MakeGlobMap::No, |_| Ok(()), + &sess, &cstore, krate, None, "test", None, MakeGlobMap::No, |_| Ok(()), ).expect("phase 2 aborted") }; let _ignore = dep_graph.in_ignore(); @@ -166,16 +166,17 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { pub fn create_simple_region_hierarchy(&self) { // creates a region hierarchy where 1 is root, 10 and 11 are // children of 1, etc + + let node = ast::NodeId::from_u32; let dscope = self.infcx .tcx .region_maps - .intern_code_extent(CodeExtentData::DestructionScope(1), + .intern_code_extent(CodeExtentData::DestructionScope(node(1)), region::ROOT_CODE_EXTENT); self.create_region_hierarchy(&RH { - id: 1, - sub: &[RH { id: 10, sub: &[] }, RH { id: 11, sub: &[] }], - }, - dscope); + id: node(1), + sub: &[RH { id: node(10), sub: &[] }, RH { id: node(11), sub: &[] }], + }, dscope); } #[allow(dead_code)] // this seems like it could be useful, even if we don't use it now @@ -220,6 +221,7 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { hir::ItemEnum(..) | hir::ItemStruct(..) | + hir::ItemUnion(..) | hir::ItemTrait(..) | hir::ItemImpl(..) | hir::ItemDefaultImpl(..) => { @@ -276,35 +278,34 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { self.infcx.tcx.mk_tup(vec![ty1, ty2]) } - pub fn t_param(&self, space: subst::ParamSpace, index: u32) -> Ty<'tcx> { + pub fn t_param(&self, index: u32) -> Ty<'tcx> { let name = format!("T{}", index); - self.infcx.tcx.mk_param(space, index, token::intern(&name[..])) + self.infcx.tcx.mk_param(index, token::intern(&name[..])) } pub fn re_early_bound(&self, - space: subst::ParamSpace, index: u32, name: &'static str) - -> ty::Region { + -> &'tcx ty::Region { let name = token::intern(name); - ty::ReEarlyBound(ty::EarlyBoundRegion { - space: space, + self.infcx.tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { index: index, name: name, - }) + })) } - pub fn re_late_bound_with_debruijn(&self, id: u32, debruijn: ty::DebruijnIndex) -> ty::Region { - ty::ReLateBound(debruijn, ty::BrAnon(id)) + pub fn re_late_bound_with_debruijn(&self, id: u32, debruijn: ty::DebruijnIndex) + -> &'tcx ty::Region { + self.infcx.tcx.mk_region(ty::ReLateBound(debruijn, ty::BrAnon(id))) } - pub fn t_rptr(&self, r: ty::Region) -> Ty<'tcx> { - self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize) + pub fn t_rptr(&self, r: &'tcx ty::Region) -> Ty<'tcx> { + self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) } pub fn t_rptr_late_bound(&self, id: u32) -> Ty<'tcx> { let r = self.re_late_bound_with_debruijn(id, ty::DebruijnIndex::new(1)); - self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize) + self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) } pub fn t_rptr_late_bound_with_debruijn(&self, @@ -312,24 +313,24 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { debruijn: ty::DebruijnIndex) -> Ty<'tcx> { let r = self.re_late_bound_with_debruijn(id, debruijn); - self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize) + self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) } - pub fn t_rptr_scope(&self, id: ast::NodeId) -> Ty<'tcx> { - let r = ty::ReScope(self.tcx().region_maps.node_extent(id)); + pub fn t_rptr_scope(&self, id: u32) -> Ty<'tcx> { + let r = ty::ReScope(self.tcx().region_maps.node_extent(ast::NodeId::from_u32(id))); self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize) } - pub fn re_free(&self, nid: ast::NodeId, id: u32) -> ty::Region { - ty::ReFree(ty::FreeRegion { + pub fn re_free(&self, nid: ast::NodeId, id: u32) -> &'tcx ty::Region { + self.infcx.tcx.mk_region(ty::ReFree(ty::FreeRegion { scope: self.tcx().region_maps.item_extent(nid), bound_region: ty::BrAnon(id), - }) + })) } - pub fn t_rptr_free(&self, nid: ast::NodeId, id: u32) -> Ty<'tcx> { - let r = self.re_free(nid, id); - self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize) + pub fn t_rptr_free(&self, nid: u32, id: u32) -> Ty<'tcx> { + let r = self.re_free(ast::NodeId::from_u32(nid), id); + self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) } pub fn t_rptr_static(&self) -> Ty<'tcx> { @@ -674,12 +675,12 @@ fn subst_ty_renumber_bound() { // t_source = fn(A) let t_source = { - let t_param = env.t_param(subst::TypeSpace, 0); + let t_param = env.t_param(0); env.t_fn(&[t_param], env.t_nil()) }; - let substs = subst::Substs::new_type(vec![t_rptr_bound1], vec![]); - let t_substituted = t_source.subst(env.infcx.tcx, &substs); + let substs = Substs::new(env.infcx.tcx, iter::once(Kind::from(t_rptr_bound1))); + let t_substituted = t_source.subst(env.infcx.tcx, substs); // t_expected = fn(&'a isize) let t_expected = { @@ -709,12 +710,12 @@ fn subst_ty_renumber_some_bounds() { // t_source = (A, fn(A)) let t_source = { - let t_param = env.t_param(subst::TypeSpace, 0); + let t_param = env.t_param(0); env.t_pair(t_param, env.t_fn(&[t_param], env.t_nil())) }; - let substs = subst::Substs::new_type(vec![t_rptr_bound1], vec![]); - let t_substituted = t_source.subst(env.infcx.tcx, &substs); + let substs = Substs::new(env.infcx.tcx, iter::once(Kind::from(t_rptr_bound1))); + let t_substituted = t_source.subst(env.infcx.tcx, substs); // t_expected = (&'a isize, fn(&'a isize)) // @@ -755,7 +756,7 @@ fn escaping() { assert!(t_rptr_bound2.has_escaping_regions()); // t_fn = fn(A) - let t_param = env.t_param(subst::TypeSpace, 0); + let t_param = env.t_param(0); assert!(!t_param.has_escaping_regions()); let t_fn = env.t_fn(&[t_param], env.t_nil()); assert!(!t_fn.has_escaping_regions()); @@ -771,12 +772,12 @@ fn subst_region_renumber_region() { // type t_source<'a> = fn(&'a isize) let t_source = { - let re_early = env.re_early_bound(subst::TypeSpace, 0, "'a"); + let re_early = env.re_early_bound(0, "'a"); env.t_fn(&[env.t_rptr(re_early)], env.t_nil()) }; - let substs = subst::Substs::new_type(vec![], vec![re_bound1]); - let t_substituted = t_source.subst(env.infcx.tcx, &substs); + let substs = Substs::new(env.infcx.tcx, iter::once(Kind::from(re_bound1))); + let t_substituted = t_source.subst(env.infcx.tcx, substs); // t_expected = fn(&'a isize) // diff --git a/src/librustc_errors/emitter.rs b/src/librustc_errors/emitter.rs index 981729ddb8..6456b72dfb 100644 --- a/src/librustc_errors/emitter.rs +++ b/src/librustc_errors/emitter.rs @@ -12,7 +12,7 @@ use self::Destination::*; use syntax_pos::{COMMAND_LINE_SP, DUMMY_SP, FileMap, Span, MultiSpan, CharPos}; -use {Level, CodeSuggestion, DiagnosticBuilder, CodeMapper}; +use {Level, CodeSuggestion, DiagnosticBuilder, SubDiagnostic, CodeMapper}; use RenderSpan::*; use snippet::{StyledString, Style, Annotation, Line}; use styled_buffer::StyledBuffer; @@ -30,7 +30,10 @@ pub trait Emitter { impl Emitter for EmitterWriter { fn emit(&mut self, db: &DiagnosticBuilder) { - self.emit_messages_default(db); + let mut primary_span = db.span.clone(); + let mut children = db.children.clone(); + self.fix_multispans_in_std_macros(&mut primary_span, &mut children); + self.emit_messages_default(&db.level, &db.message, &db.code, &primary_span, &children); } } @@ -381,19 +384,103 @@ impl EmitterWriter { max } - fn get_max_line_num(&mut self, db: &DiagnosticBuilder) -> usize { + fn get_max_line_num(&mut self, span: &MultiSpan, children: &Vec) -> usize { let mut max = 0; - let primary = self.get_multispan_max_line_num(&db.span); + let primary = self.get_multispan_max_line_num(span); max = if primary > max { primary } else { max }; - for sub in &db.children { + for sub in children { let sub_result = self.get_multispan_max_line_num(&sub.span); max = if sub_result > max { primary } else { max }; } max } + // This "fixes" MultiSpans that contain Spans that are pointing to locations inside of + // <*macros>. Since these locations are often difficult to read, we move these Spans from + // <*macros> to their corresponding use site. + fn fix_multispan_in_std_macros(&mut self, span: &mut MultiSpan) -> bool { + let mut spans_updated = false; + + if let Some(ref cm) = self.cm { + let mut before_after: Vec<(Span, Span)> = vec![]; + let mut new_labels: Vec<(Span, String)> = vec![]; + + // First, find all the spans in <*macros> and point instead at their use site + for sp in span.primary_spans() { + if (*sp == COMMAND_LINE_SP) || (*sp == DUMMY_SP) { + continue; + } + if cm.span_to_filename(sp.clone()).contains("macros>") { + let v = cm.macro_backtrace(sp.clone()); + if let Some(use_site) = v.last() { + before_after.push((sp.clone(), use_site.call_site.clone())); + } + } + for trace in cm.macro_backtrace(sp.clone()).iter().rev() { + // Only show macro locations that are local + // and display them like a span_note + if let Some(def_site) = trace.def_site_span { + if (def_site == COMMAND_LINE_SP) || (def_site == DUMMY_SP) { + continue; + } + // Check to make sure we're not in any <*macros> + if !cm.span_to_filename(def_site).contains("macros>") && + !trace.macro_decl_name.starts_with("#[") + { + new_labels.push((trace.call_site, + "in this macro invocation".to_string())); + break; + } + } + } + } + for (label_span, label_text) in new_labels { + span.push_span_label(label_span, label_text); + } + for sp_label in span.span_labels() { + if (sp_label.span == COMMAND_LINE_SP) || (sp_label.span == DUMMY_SP) { + continue; + } + if cm.span_to_filename(sp_label.span.clone()).contains("macros>") { + let v = cm.macro_backtrace(sp_label.span.clone()); + if let Some(use_site) = v.last() { + before_after.push((sp_label.span.clone(), use_site.call_site.clone())); + } + } + } + // After we have them, make sure we replace these 'bad' def sites with their use sites + for (before, after) in before_after { + span.replace(before, after); + spans_updated = true; + } + } + + spans_updated + } + + // This does a small "fix" for multispans by looking to see if it can find any that + // point directly at <*macros>. Since these are often difficult to read, this + // will change the span to point at the use site. + fn fix_multispans_in_std_macros(&mut self, + span: &mut MultiSpan, + children: &mut Vec) { + let mut spans_updated = self.fix_multispan_in_std_macros(span); + for child in children.iter_mut() { + spans_updated |= self.fix_multispan_in_std_macros(&mut child.span); + } + if spans_updated { + children.push(SubDiagnostic { + level: Level::Note, + message:"this error originates in a macro outside of the current \ + crate".to_string(), + span: MultiSpan::new(), + render_span: None + }); + } + } + fn emit_message_default(&mut self, msp: &MultiSpan, msg: &str, @@ -528,10 +615,6 @@ impl EmitterWriter { } } - if let Some(ref primary_span) = msp.primary_span().as_ref() { - self.render_macro_backtrace_old_school(primary_span, &mut buffer)?; - } - // final step: take our styled buffer, render it, then output it emit_to_destination(&buffer.render(), level, &mut self.dst)?; @@ -578,26 +661,31 @@ impl EmitterWriter { } Ok(()) } - fn emit_messages_default(&mut self, db: &DiagnosticBuilder) { - let max_line_num = self.get_max_line_num(db); + fn emit_messages_default(&mut self, + level: &Level, + message: &String, + code: &Option, + span: &MultiSpan, + children: &Vec) { + let max_line_num = self.get_max_line_num(span, children); let max_line_num_len = max_line_num.to_string().len(); - match self.emit_message_default(&db.span, - &db.message, - &db.code, - &db.level, + match self.emit_message_default(span, + message, + code, + level, max_line_num_len, false) { Ok(()) => { - if !db.children.is_empty() { + if !children.is_empty() { let mut buffer = StyledBuffer::new(); draw_col_separator_no_space(&mut buffer, 0, max_line_num_len + 1); - match emit_to_destination(&buffer.render(), &db.level, &mut self.dst) { + match emit_to_destination(&buffer.render(), level, &mut self.dst) { Ok(()) => (), Err(e) => panic!("failed to emit error: {}", e) } } - for child in &db.children { + for child in children { match child.render_span { Some(FullSpan(ref msp)) => { match self.emit_message_default(msp, @@ -637,31 +725,11 @@ impl EmitterWriter { } match write!(&mut self.dst, "\n") { Err(e) => panic!("failed to emit error: {}", e), - _ => () - } - } - fn render_macro_backtrace_old_school(&mut self, - sp: &Span, - buffer: &mut StyledBuffer) -> io::Result<()> { - if let Some(ref cm) = self.cm { - for trace in cm.macro_backtrace(sp.clone()) { - let line_offset = buffer.num_lines(); - - let mut diag_string = - format!("in this expansion of {}", trace.macro_decl_name); - if let Some(def_site_span) = trace.def_site_span { - diag_string.push_str( - &format!(" (defined in {})", - cm.span_to_filename(def_site_span))); - } - let snippet = cm.span_to_string(trace.call_site); - buffer.append(line_offset, &format!("{} ", snippet), Style::NoStyle); - buffer.append(line_offset, "note", Style::Level(Level::Note)); - buffer.append(line_offset, ": ", Style::NoStyle); - buffer.append(line_offset, &diag_string, Style::OldSchoolNoteText); + _ => match self.dst.flush() { + Err(e) => panic!("failed to emit error: {}", e), + _ => () } } - Ok(()) } } @@ -685,6 +753,21 @@ fn overlaps(a1: &Annotation, a2: &Annotation) -> bool { fn emit_to_destination(rendered_buffer: &Vec>, lvl: &Level, dst: &mut Destination) -> io::Result<()> { + use lock; + + // In order to prevent error message interleaving, where multiple error lines get intermixed + // when multiple compiler processes error simultaneously, we emit errors with additional + // steps. + // + // On Unix systems, we write into a buffered terminal rather than directly to a terminal. When + // the .flush() is called we take the buffer created from the buffered writes and write it at + // one shot. Because the Unix systems use ANSI for the colors, which is a text-based styling + // scheme, this buffered approach works and maintains the styling. + // + // On Windows, styling happens through calls to a terminal API. This prevents us from using the + // same buffering approach. Instead, we use a global Windows mutex, which we acquire long + // enough to output the full error message, then we release. + let _buffer_lock = lock::acquire_global_lock("rustc_errors"); for line in rendered_buffer { for part in line { dst.apply_style(lvl.clone(), part.style)?; @@ -693,6 +776,7 @@ fn emit_to_destination(rendered_buffer: &Vec>, } write!(dst, "\n")?; } + dst.flush()?; Ok(()) } @@ -719,14 +803,74 @@ fn stderr_isatty() -> bool { } } +pub type BufferedStderr = term::Terminal + Send; + pub enum Destination { Terminal(Box), + BufferedTerminal(Box), Raw(Box), } +/// Buffered writer gives us a way on Unix to buffer up an entire error message before we output +/// it. This helps to prevent interleaving of multiple error messages when multiple compiler +/// processes error simultaneously +pub struct BufferedWriter { + buffer: Vec, +} + +impl BufferedWriter { + // note: we use _new because the conditional compilation at its use site may make this + // this function unused on some platforms + fn _new() -> BufferedWriter { + BufferedWriter { + buffer: vec![] + } + } +} + +impl Write for BufferedWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + for b in buf { + self.buffer.push(*b); + } + Ok(buf.len()) + } + fn flush(&mut self) -> io::Result<()> { + let mut stderr = io::stderr(); + let result = (|| { + stderr.write_all(&self.buffer)?; + stderr.flush() + })(); + self.buffer.clear(); + result + } +} + impl Destination { + #[cfg(not(windows))] + /// When not on Windows, prefer the buffered terminal so that we can buffer an entire error + /// to be emitted at one time. + fn from_stderr() -> Destination { + let stderr: Option> = + term::TerminfoTerminal::new(BufferedWriter::_new()) + .map(|t| Box::new(t) as Box); + + match stderr { + Some(t) => BufferedTerminal(t), + None => Raw(Box::new(io::stderr())), + } + } + + #[cfg(windows)] + /// Return a normal, unbuffered terminal when on Windows. fn from_stderr() -> Destination { - match term::stderr() { + let stderr: Option> = + term::TerminfoTerminal::new(io::stderr()) + .map(|t| Box::new(t) as Box) + .or_else(|| term::WinConsole::new(io::stderr()).ok() + .map(|t| Box::new(t) as Box)); + + match stderr { Some(t) => Terminal(t), None => Raw(Box::new(io::stderr())), } @@ -739,34 +883,45 @@ impl Destination { match style { Style::FileNameStyle | Style::LineAndColumn => {} Style::LineNumber => { - try!(self.start_attr(term::Attr::Bold)); - try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_BLUE))); + self.start_attr(term::Attr::Bold)?; + if cfg!(windows) { + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_CYAN))?; + } else { + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_BLUE))?; + } } Style::ErrorCode => { - try!(self.start_attr(term::Attr::Bold)); - try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_MAGENTA))); + self.start_attr(term::Attr::Bold)?; + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_MAGENTA))?; } Style::Quotation => {} Style::OldSchoolNote => { - try!(self.start_attr(term::Attr::Bold)); - try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_GREEN))); + self.start_attr(term::Attr::Bold)?; + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_GREEN))?; } Style::OldSchoolNoteText | Style::HeaderMsg => { - try!(self.start_attr(term::Attr::Bold)); + self.start_attr(term::Attr::Bold)?; + if cfg!(windows) { + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_WHITE))?; + } } Style::UnderlinePrimary | Style::LabelPrimary => { - try!(self.start_attr(term::Attr::Bold)); - try!(self.start_attr(term::Attr::ForegroundColor(lvl.color()))); + self.start_attr(term::Attr::Bold)?; + self.start_attr(term::Attr::ForegroundColor(lvl.color()))?; } Style::UnderlineSecondary | Style::LabelSecondary => { - try!(self.start_attr(term::Attr::Bold)); - try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_BLUE))); + self.start_attr(term::Attr::Bold)?; + if cfg!(windows) { + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_CYAN))?; + } else { + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_BLUE))?; + } } Style::NoStyle => {} Style::Level(l) => { - try!(self.start_attr(term::Attr::Bold)); - try!(self.start_attr(term::Attr::ForegroundColor(l.color()))); + self.start_attr(term::Attr::Bold)?; + self.start_attr(term::Attr::ForegroundColor(l.color()))?; } } Ok(()) @@ -775,6 +930,7 @@ impl Destination { fn start_attr(&mut self, attr: term::Attr) -> io::Result<()> { match *self { Terminal(ref mut t) => { t.attr(attr)?; } + BufferedTerminal(ref mut t) => { t.attr(attr)?; } Raw(_) => { } } Ok(()) @@ -783,6 +939,7 @@ impl Destination { fn reset_attrs(&mut self) -> io::Result<()> { match *self { Terminal(ref mut t) => { t.reset()?; } + BufferedTerminal(ref mut t) => { t.reset()?; } Raw(_) => { } } Ok(()) @@ -793,13 +950,15 @@ impl Write for Destination { fn write(&mut self, bytes: &[u8]) -> io::Result { match *self { Terminal(ref mut t) => t.write(bytes), + BufferedTerminal(ref mut t) => t.write(bytes), Raw(ref mut w) => w.write(bytes), } } fn flush(&mut self) -> io::Result<()> { match *self { Terminal(ref mut t) => t.flush(), + BufferedTerminal(ref mut t) => t.flush(), Raw(ref mut w) => w.flush(), } } -} \ No newline at end of file +} diff --git a/src/librustc_errors/lib.rs b/src/librustc_errors/lib.rs index 172e27d56d..af8ac81b4f 100644 --- a/src/librustc_errors/lib.rs +++ b/src/librustc_errors/lib.rs @@ -21,7 +21,7 @@ #![allow(unused_attributes)] #![feature(rustc_private)] #![feature(staged_api)] -#![feature(question_mark)] +#![cfg_attr(stage0, feature(question_mark))] #![feature(range_contains)] #![feature(libc)] #![feature(unicode)] @@ -50,6 +50,7 @@ pub mod emitter; pub mod snippet; pub mod registry; pub mod styled_buffer; +mod lock; use syntax_pos::{BytePos, Loc, FileLinesResult, FileName, MultiSpan, Span, NO_EXPANSION }; use syntax_pos::{MacroBacktrace}; @@ -80,6 +81,7 @@ pub trait CodeMapper { fn span_to_string(&self, sp: Span) -> String; fn span_to_filename(&self, sp: Span) -> FileName; fn macro_backtrace(&self, span: Span) -> Vec; + fn merge_spans(&self, sp_lhs: Span, sp_rhs: Span) -> Option; } impl CodeSuggestion { @@ -272,10 +274,21 @@ impl<'a> DiagnosticBuilder<'a> { expected: &fmt::Display, found: &fmt::Display) -> &mut DiagnosticBuilder<'a> + { + self.note_expected_found_extra(label, expected, found, &"", &"") + } + + pub fn note_expected_found_extra(&mut self, + label: &fmt::Display, + expected: &fmt::Display, + found: &fmt::Display, + expected_extra: &fmt::Display, + found_extra: &fmt::Display) + -> &mut DiagnosticBuilder<'a> { // For now, just attach these as notes - self.note(&format!("expected {} `{}`", label, expected)); - self.note(&format!(" found {} `{}`", label, found)); + self.note(&format!("expected {} `{}`{}", label, expected, expected_extra)); + self.note(&format!(" found {} `{}`{}", label, found, found_extra)); self } @@ -564,6 +577,15 @@ impl Handler { self.bump_err_count(); self.panic_if_treat_err_as_bug(); } + pub fn mut_span_err<'a, S: Into>(&'a self, + sp: S, + msg: &str) + -> DiagnosticBuilder<'a> { + let mut result = DiagnosticBuilder::new(self, Level::Error, msg); + result.set_span(sp); + self.bump_err_count(); + result + } pub fn span_err_with_code>(&self, sp: S, msg: &str, code: &str) { self.emit_with_code(&sp.into(), msg, code, Error); self.bump_err_count(); @@ -722,7 +744,13 @@ impl Level { pub fn color(self) -> term::color::Color { match self { Bug | Fatal | PhaseFatal | Error => term::color::BRIGHT_RED, - Warning => term::color::YELLOW, + Warning => { + if cfg!(windows) { + term::color::BRIGHT_YELLOW + } else { + term::color::YELLOW + } + }, Note => term::color::BRIGHT_GREEN, Help => term::color::BRIGHT_CYAN, Cancelled => unreachable!(), @@ -748,4 +776,4 @@ pub fn expect(diag: &Handler, opt: Option, msg: M) -> T where Some(t) => t, None => diag.bug(&msg()), } -} \ No newline at end of file +} diff --git a/src/librustc_errors/lock.rs b/src/librustc_errors/lock.rs new file mode 100644 index 0000000000..0a9e0c4bbe --- /dev/null +++ b/src/librustc_errors/lock.rs @@ -0,0 +1,112 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Bindings to acquire a global named lock. +//! +//! This is intended to be used to synchronize multiple compiler processes to +//! ensure that we can output complete errors without interleaving on Windows. +//! Note that this is currently only needed for allowing only one 32-bit MSVC +//! linker to execute at once on MSVC hosts, so this is only implemented for +//! `cfg(windows)`. Also note that this may not always be used on Windows, +//! only when targeting 32-bit MSVC. +//! +//! For more information about why this is necessary, see where this is called. + +use std::any::Any; + +#[cfg(windows)] +#[allow(bad_style)] +pub fn acquire_global_lock(name: &str) -> Box { + use std::ffi::CString; + use std::io; + + type LPSECURITY_ATTRIBUTES = *mut u8; + type BOOL = i32; + type LPCSTR = *const u8; + type HANDLE = *mut u8; + type DWORD = u32; + + const INFINITE: DWORD = !0; + const WAIT_OBJECT_0: DWORD = 0; + const WAIT_ABANDONED: DWORD = 0x00000080; + + extern "system" { + fn CreateMutexA(lpMutexAttributes: LPSECURITY_ATTRIBUTES, + bInitialOwner: BOOL, + lpName: LPCSTR) -> HANDLE; + fn WaitForSingleObject(hHandle: HANDLE, + dwMilliseconds: DWORD) -> DWORD; + fn ReleaseMutex(hMutex: HANDLE) -> BOOL; + fn CloseHandle(hObject: HANDLE) -> BOOL; + } + + struct Handle(HANDLE); + + impl Drop for Handle { + fn drop(&mut self) { + unsafe { + CloseHandle(self.0); + } + } + } + + struct Guard(Handle); + + impl Drop for Guard { + fn drop(&mut self) { + unsafe { + ReleaseMutex((self.0).0); + } + } + } + + let cname = CString::new(name).unwrap(); + unsafe { + // Create a named mutex, with no security attributes and also not + // acquired when we create it. + // + // This will silently create one if it doesn't already exist, or it'll + // open up a handle to one if it already exists. + let mutex = CreateMutexA(0 as *mut _, 0, cname.as_ptr() as *const u8); + if mutex.is_null() { + panic!("failed to create global mutex named `{}`: {}", name, + io::Error::last_os_error()); + } + let mutex = Handle(mutex); + + // Acquire the lock through `WaitForSingleObject`. + // + // A return value of `WAIT_OBJECT_0` means we successfully acquired it. + // + // A return value of `WAIT_ABANDONED` means that the previous holder of + // the thread exited without calling `ReleaseMutex`. This can happen, + // for example, when the compiler crashes or is interrupted via ctrl-c + // or the like. In this case, however, we are still transferred + // ownership of the lock so we continue. + // + // If an error happens.. well... that's surprising! + match WaitForSingleObject(mutex.0, INFINITE) { + WAIT_OBJECT_0 | WAIT_ABANDONED => {} + code => { + panic!("WaitForSingleObject failed on global mutex named \ + `{}`: {} (ret={:x})", name, + io::Error::last_os_error(), code); + } + } + + // Return a guard which will call `ReleaseMutex` when dropped. + Box::new(Guard(mutex)) + } +} + +#[cfg(unix)] +pub fn acquire_global_lock(_name: &str) -> Box { + Box::new(()) +} diff --git a/src/librustc_incremental/Cargo.toml b/src/librustc_incremental/Cargo.toml index 7db1a6348b..e3ee752754 100644 --- a/src/librustc_incremental/Cargo.toml +++ b/src/librustc_incremental/Cargo.toml @@ -10,10 +10,9 @@ crate-type = ["dylib"] [dependencies] graphviz = { path = "../libgraphviz" } -rbml = { path = "../librbml" } rustc = { path = "../librustc" } rustc_data_structures = { path = "../librustc_data_structures" } serialize = { path = "../libserialize" } log = { path = "../liblog" } syntax = { path = "../libsyntax" } -syntax_pos = { path = "../libsyntax_pos" } \ No newline at end of file +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_incremental/assert_dep_graph.rs b/src/librustc_incremental/assert_dep_graph.rs index 420c88e89b..28aab1fdd4 100644 --- a/src/librustc_incremental/assert_dep_graph.rs +++ b/src/librustc_incremental/assert_dep_graph.rs @@ -26,19 +26,20 @@ //! used to check when paths exist or do not. //! //! The full form of the `rustc_if_this_changed` annotation is -//! `#[rustc_if_this_changed(id)]`. The `"id"` is optional and -//! defaults to `"id"` if omitted. +//! `#[rustc_if_this_changed("foo")]`, which will report a +//! source node of `foo(def_id)`. The `"foo"` is optional and +//! defaults to `"Hir"` if omitted. //! //! Example: //! //! ``` -//! #[rustc_if_this_changed] +//! #[rustc_if_this_changed(Hir)] //! fn foo() { } //! -//! #[rustc_then_this_would_need("trans")] //~ ERROR no path from `foo` +//! #[rustc_then_this_would_need(trans)] //~ ERROR no path from `foo` //! fn bar() { } //! -//! #[rustc_then_this_would_need("trans")] //~ ERROR OK +//! #[rustc_then_this_would_need(trans)] //~ ERROR OK //! fn baz() { foo(); } //! ``` @@ -47,7 +48,7 @@ use rustc::dep_graph::{DepGraphQuery, DepNode}; use rustc::dep_graph::debug::{DepNodeFilter, EdgeFilter}; use rustc::hir::def_id::DefId; use rustc::ty::TyCtxt; -use rustc_data_structures::fnv::{FnvHashMap, FnvHashSet}; +use rustc_data_structures::fnv::FnvHashSet; use rustc_data_structures::graph::{Direction, INCOMING, OUTGOING, NodeIndex}; use rustc::hir; use rustc::hir::intravisit::Visitor; @@ -56,13 +57,9 @@ use std::env; use std::fs::File; use std::io::Write; use syntax::ast; -use syntax::attr::AttrMetaMethods; use syntax::parse::token::InternedString; use syntax_pos::Span; - -const IF_THIS_CHANGED: &'static str = "rustc_if_this_changed"; -const THEN_THIS_WOULD_NEED: &'static str = "rustc_then_this_would_need"; -const ID: &'static str = "id"; +use {ATTR_IF_THIS_CHANGED, ATTR_THEN_THIS_WOULD_NEED}; pub fn assert_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let _ignore = tcx.dep_graph.in_ignore(); @@ -81,8 +78,9 @@ pub fn assert_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { // Find annotations supplied by user (if any). let (if_this_changed, then_this_would_need) = { let mut visitor = IfThisChanged { tcx: tcx, - if_this_changed: FnvHashMap(), - then_this_would_need: FnvHashMap() }; + if_this_changed: vec![], + then_this_would_need: vec![] }; + visitor.process_attrs(ast::CRATE_NODE_ID, &tcx.map.krate().attrs); tcx.map.krate().visit_all_items(&mut visitor); (visitor.if_this_changed, visitor.then_this_would_need) }; @@ -91,56 +89,58 @@ pub fn assert_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { assert!(tcx.sess.opts.debugging_opts.query_dep_graph, "cannot use the `#[{}]` or `#[{}]` annotations \ without supplying `-Z query-dep-graph`", - IF_THIS_CHANGED, THEN_THIS_WOULD_NEED); + ATTR_IF_THIS_CHANGED, ATTR_THEN_THIS_WOULD_NEED); } // Check paths. check_paths(tcx, &if_this_changed, &then_this_would_need); } -type SourceHashMap = - FnvHashMap)>>; -type TargetHashMap = - FnvHashMap)>>; +type Sources = Vec<(Span, DefId, DepNode)>; +type Targets = Vec<(Span, InternedString, ast::NodeId, DepNode)>; struct IfThisChanged<'a, 'tcx:'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, - if_this_changed: SourceHashMap, - then_this_would_need: TargetHashMap, + if_this_changed: Sources, + then_this_would_need: Targets, } impl<'a, 'tcx> IfThisChanged<'a, 'tcx> { - fn process_attrs(&mut self, node_id: ast::NodeId, def_id: DefId) { - for attr in self.tcx.get_attrs(def_id).iter() { - if attr.check_name(IF_THIS_CHANGED) { - let mut id = None; - for meta_item in attr.meta_item_list().unwrap_or_default() { - if meta_item.is_word() && id.is_none() { - id = Some(meta_item.name().clone()); - } else { - // FIXME better-encapsulate meta_item (don't directly access `node`) - span_bug!(meta_item.span(), "unexpected meta-item {:?}", meta_item.node) - } - } - let id = id.unwrap_or(InternedString::new(ID)); - self.if_this_changed.entry(id) - .or_insert(FnvHashSet()) - .insert((attr.span, def_id, DepNode::Hir(def_id))); - } else if attr.check_name(THEN_THIS_WOULD_NEED) { - let mut dep_node_interned = None; - let mut id = None; - for meta_item in attr.meta_item_list().unwrap_or_default() { - if meta_item.is_word() && dep_node_interned.is_none() { - dep_node_interned = Some(meta_item.name().clone()); - } else if meta_item.is_word() && id.is_none() { - id = Some(meta_item.name().clone()); - } else { - // FIXME better-encapsulate meta_item (don't directly access `node`) - span_bug!(meta_item.span(), "unexpected meta-item {:?}", meta_item.node) + fn argument(&self, attr: &ast::Attribute) -> Option { + let mut value = None; + for list_item in attr.meta_item_list().unwrap_or_default() { + match list_item.word() { + Some(word) if value.is_none() => + value = Some(word.name().clone()), + _ => + // FIXME better-encapsulate meta_item (don't directly access `node`) + span_bug!(list_item.span(), "unexpected meta-item {:?}", list_item.node), + } + } + value + } + + fn process_attrs(&mut self, node_id: ast::NodeId, attrs: &[ast::Attribute]) { + let def_id = self.tcx.map.local_def_id(node_id); + for attr in attrs { + if attr.check_name(ATTR_IF_THIS_CHANGED) { + let dep_node_interned = self.argument(attr); + let dep_node = match dep_node_interned { + None => DepNode::Hir(def_id), + Some(ref n) => { + match DepNode::from_label_string(&n[..], def_id) { + Ok(n) => n, + Err(()) => { + self.tcx.sess.span_fatal( + attr.span, + &format!("unrecognized DepNode variant {:?}", n)); + } + } } - } + }; + self.if_this_changed.push((attr.span, def_id, dep_node)); + } else if attr.check_name(ATTR_THEN_THIS_WOULD_NEED) { + let dep_node_interned = self.argument(attr); let dep_node = match dep_node_interned { Some(ref n) => { match DepNode::from_label_string(&n[..], def_id) { @@ -158,11 +158,10 @@ impl<'a, 'tcx> IfThisChanged<'a, 'tcx> { &format!("missing DepNode variant")); } }; - let id = id.unwrap_or(InternedString::new(ID)); - self.then_this_would_need - .entry(id) - .or_insert(FnvHashSet()) - .insert((attr.span, dep_node_interned.clone().unwrap(), node_id, dep_node)); + self.then_this_would_need.push((attr.span, + dep_node_interned.clone().unwrap(), + node_id, + dep_node)); } } } @@ -170,47 +169,38 @@ impl<'a, 'tcx> IfThisChanged<'a, 'tcx> { impl<'a, 'tcx> Visitor<'tcx> for IfThisChanged<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { - let def_id = self.tcx.map.local_def_id(item.id); - self.process_attrs(item.id, def_id); + self.process_attrs(item.id, &item.attrs); } } fn check_paths<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - if_this_changed: &SourceHashMap, - then_this_would_need: &TargetHashMap) + if_this_changed: &Sources, + then_this_would_need: &Targets) { // Return early here so as not to construct the query, which is not cheap. if if_this_changed.is_empty() { + for &(target_span, _, _, _) in then_this_would_need { + tcx.sess.span_err( + target_span, + &format!("no #[rustc_if_this_changed] annotation detected")); + + } return; } let query = tcx.dep_graph.query(); - for (id, sources) in if_this_changed { - let targets = match then_this_would_need.get(id) { - Some(targets) => targets, - None => { - for &(source_span, _, _) in sources.iter().take(1) { - tcx.sess.span_err( - source_span, - &format!("no targets for id `{}`", id)); - } - continue; - } - }; - - for &(_, source_def_id, ref source_dep_node) in sources { - let dependents = query.transitive_successors(source_dep_node); - for &(target_span, ref target_pass, _, ref target_dep_node) in targets { - if !dependents.contains(&target_dep_node) { - tcx.sess.span_err( - target_span, - &format!("no path from `{}` to `{}`", - tcx.item_path_str(source_def_id), - target_pass)); - } else { - tcx.sess.span_err( - target_span, - &format!("OK")); - } + for &(_, source_def_id, ref source_dep_node) in if_this_changed { + let dependents = query.transitive_successors(source_dep_node); + for &(target_span, ref target_pass, _, ref target_dep_node) in then_this_would_need { + if !dependents.contains(&target_dep_node) { + tcx.sess.span_err( + target_span, + &format!("no path from `{}` to `{}`", + tcx.item_path_str(source_def_id), + target_pass)); + } else { + tcx.sess.span_err( + target_span, + &format!("OK")); } } } diff --git a/src/librustc_incremental/calculate_svh/caching_codemap_view.rs b/src/librustc_incremental/calculate_svh/caching_codemap_view.rs new file mode 100644 index 0000000000..ad9c48420e --- /dev/null +++ b/src/librustc_incremental/calculate_svh/caching_codemap_view.rs @@ -0,0 +1,115 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::TyCtxt; +use std::rc::Rc; +use syntax::codemap::CodeMap; +use syntax_pos::{BytePos, FileMap}; + +#[derive(Clone)] +struct CacheEntry { + time_stamp: usize, + line_number: usize, + line_start: BytePos, + line_end: BytePos, + file: Rc, +} + +pub struct CachingCodemapView<'tcx> { + codemap: &'tcx CodeMap, + line_cache: [CacheEntry; 3], + time_stamp: usize, +} + +impl<'tcx> CachingCodemapView<'tcx> { + pub fn new<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> CachingCodemapView<'tcx> { + let codemap = tcx.sess.codemap(); + let first_file = codemap.files.borrow()[0].clone(); + let entry = CacheEntry { + time_stamp: 0, + line_number: 0, + line_start: BytePos(0), + line_end: BytePos(0), + file: first_file, + }; + + CachingCodemapView { + codemap: codemap, + line_cache: [entry.clone(), entry.clone(), entry.clone()], + time_stamp: 0, + } + } + + pub fn codemap(&self) -> &'tcx CodeMap { + self.codemap + } + + pub fn byte_pos_to_line_and_col(&mut self, + pos: BytePos) + -> Option<(Rc, usize, BytePos)> { + self.time_stamp += 1; + + // Check if the position is in one of the cached lines + for cache_entry in self.line_cache.iter_mut() { + if pos >= cache_entry.line_start && pos < cache_entry.line_end { + cache_entry.time_stamp = self.time_stamp; + return Some((cache_entry.file.clone(), + cache_entry.line_number, + pos - cache_entry.line_start)); + } + } + + // No cache hit ... + let mut oldest = 0; + for index in 1 .. self.line_cache.len() { + if self.line_cache[index].time_stamp < self.line_cache[oldest].time_stamp { + oldest = index; + } + } + + let cache_entry = &mut self.line_cache[oldest]; + + // If the entry doesn't point to the correct file, fix it up + if pos < cache_entry.file.start_pos || pos >= cache_entry.file.end_pos { + let file_valid; + let files = self.codemap.files.borrow(); + + if files.len() > 0 { + let file_index = self.codemap.lookup_filemap_idx(pos); + let file = files[file_index].clone(); + + if pos >= file.start_pos && pos < file.end_pos { + cache_entry.file = file; + file_valid = true; + } else { + file_valid = false; + } + } else { + file_valid = false; + } + + if !file_valid { + return None; + } + } + + let line_index = cache_entry.file.lookup_line(pos).unwrap(); + let line_bounds = cache_entry.file.line_bounds(line_index); + + cache_entry.line_number = line_index + 1; + cache_entry.line_start = line_bounds.0; + cache_entry.line_end = line_bounds.1; + cache_entry.time_stamp = self.time_stamp; + + return Some((cache_entry.file.clone(), + cache_entry.line_number, + pos - cache_entry.line_start)); + } +} diff --git a/src/librustc_incremental/calculate_svh/def_path_hash.rs b/src/librustc_incremental/calculate_svh/def_path_hash.rs new file mode 100644 index 0000000000..8aa134ba3b --- /dev/null +++ b/src/librustc_incremental/calculate_svh/def_path_hash.rs @@ -0,0 +1,36 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::hir::def_id::DefId; +use rustc::ty::TyCtxt; +use rustc::util::nodemap::DefIdMap; + +pub struct DefPathHashes<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + data: DefIdMap, +} + +impl<'a, 'tcx> DefPathHashes<'a, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self { + DefPathHashes { + tcx: tcx, + data: DefIdMap() + } + } + + pub fn hash(&mut self, def_id: DefId) -> u64 { + let tcx = self.tcx; + *self.data.entry(def_id) + .or_insert_with(|| { + let def_path = tcx.def_path(def_id); + def_path.deterministic_hash(tcx) + }) + } +} diff --git a/src/librustc_incremental/calculate_svh/mod.rs b/src/librustc_incremental/calculate_svh/mod.rs index d7caf8c882..a22b51ac04 100644 --- a/src/librustc_incremental/calculate_svh/mod.rs +++ b/src/librustc_incremental/calculate_svh/mod.rs @@ -8,106 +8,193 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Calculation of a Strict Version Hash for crates. For a length -//! comment explaining the general idea, see `librustc/middle/svh.rs`. - -use syntax::attr::AttributeMethods; -use std::hash::{Hash, SipHasher, Hasher}; +//! Calculation of the (misnamed) "strict version hash" for crates and +//! items. This hash is used to tell when the HIR changed in such a +//! way that results from previous compilations may no longer be +//! applicable and hence must be recomputed. It should probably be +//! renamed to the ICH (incremental compilation hash). +//! +//! The hashes for all items are computed once at the beginning of +//! compilation and stored into a map. In addition, a hash is computed +//! of the **entire crate**. +//! +//! Storing the hashes in a map avoids the need to compute them twice +//! (once when loading prior incremental results and once when +//! saving), but it is also important for correctness: at least as of +//! the time of this writing, the typeck passes rewrites entries in +//! the dep-map in-place to accommodate UFCS resolutions. Since name +//! resolution is part of the hash, the result is that hashes computed +//! at the end of compilation would be different from those computed +//! at the beginning. + +use syntax::ast; +use std::cell::RefCell; +use std::hash::{Hash, Hasher}; +use std::collections::hash_map::DefaultHasher; +use rustc::dep_graph::DepNode; +use rustc::hir; use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId}; -use rustc::hir::map::{NodeItem, NodeForeignItem}; -use rustc::hir::svh::Svh; +use rustc::hir::intravisit as visit; use rustc::ty::TyCtxt; -use rustc::hir::intravisit::{self, Visitor}; +use rustc_data_structures::fnv::FnvHashMap; +use rustc::util::common::record_time; +use rustc::session::config::DebugInfoLevel::NoDebugInfo; +use self::def_path_hash::DefPathHashes; use self::svh_visitor::StrictVersionHashVisitor; +use self::caching_codemap_view::CachingCodemapView; +mod def_path_hash; mod svh_visitor; +mod caching_codemap_view; -pub trait SvhCalculate { - /// Calculate the SVH for an entire krate. - fn calculate_krate_hash(self) -> Svh; +pub struct IncrementalHashesMap { + hashes: FnvHashMap, u64>, - /// Calculate the SVH for a particular item. - fn calculate_item_hash(self, def_id: DefId) -> u64; + // These are the metadata hashes for the current crate as they were stored + // during the last compilation session. They are only loaded if + // -Z query-dep-graph was specified and are needed for auto-tests using + // the #[rustc_metadata_dirty] and #[rustc_metadata_clean] attributes to + // check whether some metadata hash has changed in between two revisions. + pub prev_metadata_hashes: RefCell>, } -impl<'a, 'tcx> SvhCalculate for TyCtxt<'a, 'tcx, 'tcx> { - fn calculate_krate_hash(self) -> Svh { - // FIXME (#14132): This is better than it used to be, but it still not - // ideal. We now attempt to hash only the relevant portions of the - // Crate AST as well as the top-level crate attributes. (However, - // the hashing of the crate attributes should be double-checked - // to ensure it is not incorporating implementation artifacts into - // the hash that are not otherwise visible.) +impl IncrementalHashesMap { + pub fn new() -> IncrementalHashesMap { + IncrementalHashesMap { + hashes: FnvHashMap(), + prev_metadata_hashes: RefCell::new(FnvHashMap()), + } + } - let crate_disambiguator = self.sess.local_crate_disambiguator(); - let krate = self.map.krate(); + pub fn insert(&mut self, k: DepNode, v: u64) -> Option { + self.hashes.insert(k, v) + } - // FIXME: this should use SHA1, not SipHash. SipHash is not built to - // avoid collisions. - let mut state = SipHasher::new(); - debug!("state: {:?}", state); + pub fn iter<'a>(&'a self) -> ::std::collections::hash_map::Iter<'a, DepNode, u64> { + self.hashes.iter() + } +} - // FIXME(#32753) -- at (*) we `to_le` for endianness, but is - // this enough, and does it matter anyway? - "crate_disambiguator".hash(&mut state); - crate_disambiguator.len().to_le().hash(&mut state); // (*) - crate_disambiguator.hash(&mut state); +impl<'a> ::std::ops::Index<&'a DepNode> for IncrementalHashesMap { + type Output = u64; - debug!("crate_disambiguator: {:?}", crate_disambiguator); - debug!("state: {:?}", state); + fn index(&self, index: &'a DepNode) -> &u64 { + &self.hashes[index] + } +} - { - let mut visit = StrictVersionHashVisitor::new(&mut state, self); - krate.visit_all_items(&mut visit); - } - // FIXME (#14132): This hash is still sensitive to e.g. the - // spans of the crate Attributes and their underlying - // MetaItems; we should make ContentHashable impl for those - // types and then use hash_content. But, since all crate - // attributes should appear near beginning of the file, it is - // not such a big deal to be sensitive to their spans for now. - // - // We hash only the MetaItems instead of the entire Attribute - // to avoid hashing the AttrId - for attr in &krate.attrs { - debug!("krate attr {:?}", attr); - attr.meta().hash(&mut state); - } +pub fn compute_incremental_hashes_map<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> IncrementalHashesMap { + let _ignore = tcx.dep_graph.in_ignore(); + let krate = tcx.map.krate(); + let hash_spans = tcx.sess.opts.debuginfo != NoDebugInfo; + let mut visitor = HashItemsVisitor { + tcx: tcx, + hashes: IncrementalHashesMap::new(), + def_path_hashes: DefPathHashes::new(tcx), + codemap: CachingCodemapView::new(tcx), + hash_spans: hash_spans, + }; + record_time(&tcx.sess.perf_stats.incr_comp_hashes_time, || { + visitor.calculate_def_id(DefId::local(CRATE_DEF_INDEX), + |v| visit::walk_crate(v, krate)); + krate.visit_all_items(&mut visitor); + }); + record_time(&tcx.sess.perf_stats.svh_time, || visitor.compute_crate_hash()); + visitor.hashes +} - Svh::new(state.finish()) +struct HashItemsVisitor<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_path_hashes: DefPathHashes<'a, 'tcx>, + codemap: CachingCodemapView<'tcx>, + hashes: IncrementalHashesMap, + hash_spans: bool, +} + +impl<'a, 'tcx> HashItemsVisitor<'a, 'tcx> { + fn calculate_node_id(&mut self, id: ast::NodeId, walk_op: W) + where W: for<'v> FnMut(&mut StrictVersionHashVisitor<'v, 'a, 'tcx>) + { + let def_id = self.tcx.map.local_def_id(id); + self.calculate_def_id(def_id, walk_op) } - fn calculate_item_hash(self, def_id: DefId) -> u64 { + fn calculate_def_id(&mut self, def_id: DefId, mut walk_op: W) + where W: for<'v> FnMut(&mut StrictVersionHashVisitor<'v, 'a, 'tcx>) + { assert!(def_id.is_local()); + debug!("HashItemsVisitor::calculate(def_id={:?})", def_id); + // FIXME: this should use SHA1, not DefaultHasher. DefaultHasher is not + // built to avoid collisions. + let mut state = DefaultHasher::new(); + walk_op(&mut StrictVersionHashVisitor::new(&mut state, + self.tcx, + &mut self.def_path_hashes, + &mut self.codemap, + self.hash_spans)); + let item_hash = state.finish(); + self.hashes.insert(DepNode::Hir(def_id), item_hash); + debug!("calculate_item_hash: def_id={:?} hash={:?}", def_id, item_hash); + } - debug!("calculate_item_hash(def_id={:?})", def_id); + fn compute_crate_hash(&mut self) { + let krate = self.tcx.map.krate(); - let mut state = SipHasher::new(); + let mut crate_state = DefaultHasher::new(); + + let crate_disambiguator = self.tcx.sess.local_crate_disambiguator(); + "crate_disambiguator".hash(&mut crate_state); + crate_disambiguator.len().hash(&mut crate_state); + crate_disambiguator.hash(&mut crate_state); + + // add each item (in some deterministic order) to the overall + // crate hash. + { + let def_path_hashes = &mut self.def_path_hashes; + let mut item_hashes: Vec<_> = + self.hashes.iter() + .map(|(item_dep_node, &item_hash)| { + // convert from a DepNode tp a + // DepNode where the u64 is the + // hash of the def-id's def-path: + let item_dep_node = + item_dep_node.map_def(|&did| Some(def_path_hashes.hash(did))) + .unwrap(); + (item_dep_node, item_hash) + }) + .collect(); + item_hashes.sort(); // avoid artificial dependencies on item ordering + item_hashes.hash(&mut crate_state); + } { - let mut visit = StrictVersionHashVisitor::new(&mut state, self); - if def_id.index == CRATE_DEF_INDEX { - // the crate root itself is not registered in the map - // as an item, so we have to fetch it this way - let krate = self.map.krate(); - intravisit::walk_crate(&mut visit, krate); - } else { - let node_id = self.map.as_local_node_id(def_id).unwrap(); - match self.map.find(node_id) { - Some(NodeItem(item)) => visit.visit_item(item), - Some(NodeForeignItem(item)) => visit.visit_foreign_item(item), - r => bug!("calculate_item_hash: expected an item for node {} not {:?}", - node_id, r), - } - } + let mut visitor = StrictVersionHashVisitor::new(&mut crate_state, + self.tcx, + &mut self.def_path_hashes, + &mut self.codemap, + self.hash_spans); + visitor.hash_attributes(&krate.attrs); } - let hash = state.finish(); + let crate_hash = crate_state.finish(); + self.hashes.insert(DepNode::Krate, crate_hash); + debug!("calculate_crate_hash: crate_hash={:?}", crate_hash); + } +} + - debug!("calculate_item_hash: def_id={:?} hash={:?}", def_id, hash); +impl<'a, 'tcx> visit::Visitor<'tcx> for HashItemsVisitor<'a, 'tcx> { + fn visit_item(&mut self, item: &'tcx hir::Item) { + self.calculate_node_id(item.id, |v| v.visit_item(item)); + visit::walk_item(self, item); + } - hash + fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem) { + self.calculate_node_id(item.id, |v| v.visit_foreign_item(item)); + visit::walk_foreign_item(self, item); } } + diff --git a/src/librustc_incremental/calculate_svh/svh_visitor.rs b/src/librustc_incremental/calculate_svh/svh_visitor.rs index 42e7abeeac..0cd5ae6845 100644 --- a/src/librustc_incremental/calculate_svh/svh_visitor.rs +++ b/src/librustc_incremental/calculate_svh/svh_visitor.rs @@ -13,37 +13,112 @@ // hash computation, but for many kinds of items the order of // declaration should be irrelevant to the ABI. -pub use self::SawExprComponent::*; -pub use self::SawStmtComponent::*; +use self::SawExprComponent::*; use self::SawAbiComponent::*; use syntax::ast::{self, Name, NodeId}; use syntax::parse::token; -use syntax_pos::Span; +use syntax_pos::{Span, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos}; use rustc::hir; use rustc::hir::*; use rustc::hir::def::{Def, PathResolution}; use rustc::hir::def_id::DefId; use rustc::hir::intravisit as visit; -use rustc::hir::intravisit::{Visitor, FnKind}; -use rustc::hir::map::DefPath; use rustc::ty::TyCtxt; - -use std::hash::{Hash, SipHasher}; - -pub struct StrictVersionHashVisitor<'a, 'tcx: 'a> { - pub tcx: TyCtxt<'a, 'tcx, 'tcx>, - pub st: &'a mut SipHasher, +use rustc_data_structures::fnv; +use std::hash::Hash; +use std::collections::hash_map::DefaultHasher; + +use super::def_path_hash::DefPathHashes; +use super::caching_codemap_view::CachingCodemapView; + +const IGNORED_ATTRIBUTES: &'static [&'static str] = &[ + "cfg", + ::ATTR_IF_THIS_CHANGED, + ::ATTR_THEN_THIS_WOULD_NEED, + ::ATTR_DIRTY, + ::ATTR_CLEAN, + ::ATTR_DIRTY_METADATA, + ::ATTR_CLEAN_METADATA +]; + +pub struct StrictVersionHashVisitor<'a, 'hash: 'a, 'tcx: 'hash> { + pub tcx: TyCtxt<'hash, 'tcx, 'tcx>, + pub st: &'a mut DefaultHasher, + // collect a deterministic hash of def-ids that we have seen + def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>, + hash_spans: bool, + codemap: &'a mut CachingCodemapView<'tcx>, } -impl<'a, 'tcx> StrictVersionHashVisitor<'a, 'tcx> { - pub fn new(st: &'a mut SipHasher, - tcx: TyCtxt<'a, 'tcx, 'tcx>) +impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> { + pub fn new(st: &'a mut DefaultHasher, + tcx: TyCtxt<'hash, 'tcx, 'tcx>, + def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>, + codemap: &'a mut CachingCodemapView<'tcx>, + hash_spans: bool) -> Self { - StrictVersionHashVisitor { st: st, tcx: tcx } + StrictVersionHashVisitor { + st: st, + tcx: tcx, + def_path_hashes: def_path_hashes, + hash_spans: hash_spans, + codemap: codemap, + } + } + + fn compute_def_id_hash(&mut self, def_id: DefId) -> u64 { + self.def_path_hashes.hash(def_id) } - fn hash_def_path(&mut self, path: &DefPath) { - path.deterministic_hash_to(self.tcx, self.st); + // Hash a span in a stable way. We can't directly hash the span's BytePos + // fields (that would be similar to hashing pointers, since those are just + // offsets into the CodeMap). Instead, we hash the (file name, line, column) + // triple, which stays the same even if the containing FileMap has moved + // within the CodeMap. + // Also note that we are hashing byte offsets for the column, not unicode + // codepoint offsets. For the purpose of the hash that's sufficient. + fn hash_span(&mut self, span: Span) { + debug_assert!(self.hash_spans); + debug!("hash_span: st={:?}", self.st); + + // If this is not an empty or invalid span, we want to hash the last + // position that belongs to it, as opposed to hashing the first + // position past it. + let span_hi = if span.hi > span.lo { + // We might end up in the middle of a multibyte character here, + // but that's OK, since we are not trying to decode anything at + // this position. + span.hi - BytePos(1) + } else { + span.hi + }; + + let loc1 = self.codemap.byte_pos_to_line_and_col(span.lo); + let loc2 = self.codemap.byte_pos_to_line_and_col(span_hi); + + let expansion_kind = match span.expn_id { + NO_EXPANSION => SawSpanExpnKind::NoExpansion, + COMMAND_LINE_EXPN => SawSpanExpnKind::CommandLine, + _ => SawSpanExpnKind::SomeExpansion, + }; + + SawSpan(loc1.as_ref().map(|&(ref fm, line, col)| (&fm.name[..], line, col)), + loc2.as_ref().map(|&(ref fm, line, col)| (&fm.name[..], line, col)), + expansion_kind) + .hash(self.st); + + if expansion_kind == SawSpanExpnKind::SomeExpansion { + let call_site = self.codemap.codemap().source_callsite(span); + self.hash_span(call_site); + } + } + + fn hash_discriminant(&mut self, v: &T) { + unsafe { + let disr = ::std::intrinsics::discriminant_value(v); + debug!("hash_discriminant: disr={}, st={:?}", disr, self.st); + disr.hash(self.st); + } } } @@ -75,26 +150,38 @@ enum SawAbiComponent<'a> { SawIdent(token::InternedString), SawStructDef(token::InternedString), - SawLifetime(token::InternedString), - SawLifetimeDef(token::InternedString), + SawLifetime, + SawLifetimeDef(usize), SawMod, SawForeignItem, SawItem, SawTy, SawGenerics, - SawFn, SawTraitItem, SawImplItem, SawStructField, SawVariant, - SawPath, + SawPath(bool), + SawPathSegment, + SawPathParameters, + SawPathListItem, SawBlock, SawPat, SawLocal, SawArm, SawExpr(SawExprComponent<'a>), - SawStmt(SawStmtComponent), + SawStmt, + SawVis, + SawWherePredicate, + SawTyParamBound, + SawPolyTraitRef, + SawAssocTypeBinding, + SawAttribute(ast::AttrStyle), + SawMacroDef, + SawSpan(Option<(&'a str, usize, BytePos)>, + Option<(&'a str, usize, BytePos)>, + SawSpanExpnKind), } /// SawExprComponent carries all of the information that we want @@ -112,7 +199,7 @@ enum SawAbiComponent<'a> { /// guarantee of collision-freedom, hash collisions are just /// (hopefully) unlikely.) #[derive(Hash)] -pub enum SawExprComponent<'a> { +enum SawExprComponent<'a> { SawExprLoop(Option), SawExprField(token::InternedString), @@ -133,7 +220,7 @@ pub enum SawExprComponent<'a> { SawExprIf, SawExprWhile, SawExprMatch, - SawExprClosure, + SawExprClosure(CaptureClause), SawExprBlock, SawExprAssign, SawExprAssignOp(hir::BinOp_), @@ -153,7 +240,7 @@ fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> { ExprCall(..) => SawExprCall, ExprMethodCall(..) => SawExprMethodCall, ExprTup(..) => SawExprTup, - ExprBinary(op, _, _) => SawExprBinary(op.node), + ExprBinary(op, ..) => SawExprBinary(op.node), ExprUnary(op, _) => SawExprUnary(op), ExprLit(ref lit) => SawExprLit(lit.node.clone()), ExprCast(..) => SawExprCast, @@ -162,10 +249,10 @@ fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> { ExprWhile(..) => SawExprWhile, ExprLoop(_, id) => SawExprLoop(id.map(|id| id.node.as_str())), ExprMatch(..) => SawExprMatch, - ExprClosure(..) => SawExprClosure, + ExprClosure(cc, _, _, _) => SawExprClosure(cc), ExprBlock(..) => SawExprBlock, ExprAssign(..) => SawExprAssign, - ExprAssignOp(op, _, _) => SawExprAssignOp(op.node), + ExprAssignOp(op, ..) => SawExprAssignOp(op.node), ExprField(_, name) => SawExprField(name.node.as_str()), ExprTupField(_, id) => SawExprTupField(id.node), ExprIndex(..) => SawExprIndex, @@ -174,80 +261,92 @@ fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> { ExprBreak(id) => SawExprBreak(id.map(|id| id.node.as_str())), ExprAgain(id) => SawExprAgain(id.map(|id| id.node.as_str())), ExprRet(..) => SawExprRet, - ExprInlineAsm(ref a,_,_) => SawExprInlineAsm(a), + ExprInlineAsm(ref a,..) => SawExprInlineAsm(a), ExprStruct(..) => SawExprStruct, ExprRepeat(..) => SawExprRepeat, } } -/// SawStmtComponent is analogous to SawExprComponent, but for statements. -#[derive(Hash)] -pub enum SawStmtComponent { - SawStmtExpr, - SawStmtSemi, +#[derive(Clone, Copy, Hash, Eq, PartialEq)] +enum SawSpanExpnKind { + NoExpansion, + CommandLine, + SomeExpansion, } -impl<'a, 'tcx> Visitor<'a> for StrictVersionHashVisitor<'a, 'tcx> { +macro_rules! hash_attrs { + ($visitor:expr, $attrs:expr) => ({ + let attrs = $attrs; + if attrs.len() > 0 { + $visitor.hash_attributes(attrs); + } + }) +} + +macro_rules! hash_span { + ($visitor:expr, $span:expr) => ({ + if $visitor.hash_spans { + $visitor.hash_span($span); + } + }) +} + +impl<'a, 'hash, 'tcx> visit::Visitor<'tcx> for StrictVersionHashVisitor<'a, 'hash, 'tcx> { fn visit_nested_item(&mut self, _: ItemId) { // Each item is hashed independently; ignore nested items. } - fn visit_variant_data(&mut self, s: &'a VariantData, name: Name, - g: &'a Generics, _: NodeId, _: Span) { + fn visit_variant_data(&mut self, + s: &'tcx VariantData, + name: Name, + _: &'tcx Generics, + _: NodeId, + span: Span) { debug!("visit_variant_data: st={:?}", self.st); SawStructDef(name.as_str()).hash(self.st); - visit::walk_generics(self, g); - visit::walk_struct_def(self, s) + hash_span!(self, span); + visit::walk_struct_def(self, s); } - fn visit_variant(&mut self, v: &'a Variant, g: &'a Generics, item_id: NodeId) { + fn visit_variant(&mut self, + v: &'tcx Variant, + g: &'tcx Generics, + item_id: NodeId) { debug!("visit_variant: st={:?}", self.st); SawVariant.hash(self.st); - // walk_variant does not call walk_generics, so do it here. - visit::walk_generics(self, g); + hash_attrs!(self, &v.node.attrs); visit::walk_variant(self, v, g, item_id) } - // All of the remaining methods just record (in the hash - // SipHasher) that the visitor saw that particular variant - // (with its payload), and continue walking as the default - // visitor would. - // - // Some of the implementations have some notes as to how one - // might try to make their SVH computation less discerning - // (e.g. by incorporating reachability analysis). But - // currently all of their implementations are uniform and - // uninteresting. - // - // (If you edit a method such that it deviates from the - // pattern, please move that method up above this comment.) - - fn visit_name(&mut self, _: Span, name: Name) { + fn visit_name(&mut self, span: Span, name: Name) { debug!("visit_name: st={:?}", self.st); SawIdent(name.as_str()).hash(self.st); + hash_span!(self, span); } - fn visit_lifetime(&mut self, l: &'a Lifetime) { + fn visit_lifetime(&mut self, l: &'tcx Lifetime) { debug!("visit_lifetime: st={:?}", self.st); - SawLifetime(l.name.as_str()).hash(self.st); + SawLifetime.hash(self.st); + visit::walk_lifetime(self, l); } - fn visit_lifetime_def(&mut self, l: &'a LifetimeDef) { + fn visit_lifetime_def(&mut self, l: &'tcx LifetimeDef) { debug!("visit_lifetime_def: st={:?}", self.st); - SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st); + SawLifetimeDef(l.bounds.len()).hash(self.st); + visit::walk_lifetime_def(self, l); } - // We do recursively walk the bodies of functions/methods - // (rather than omitting their bodies from the hash) since - // monomorphization and cross-crate inlining generally implies - // that a change to a crate body will require downstream - // crates to be recompiled. - fn visit_expr(&mut self, ex: &'a Expr) { + fn visit_expr(&mut self, ex: &'tcx Expr) { debug!("visit_expr: st={:?}", self.st); - SawExpr(saw_expr(&ex.node)).hash(self.st); visit::walk_expr(self, ex) + SawExpr(saw_expr(&ex.node)).hash(self.st); + // No need to explicitly hash the discriminant here, since we are + // implicitly hashing the discriminant of SawExprComponent. + hash_span!(self, ex.span); + hash_attrs!(self, &ex.attrs); + visit::walk_expr(self, ex) } - fn visit_stmt(&mut self, s: &'a Stmt) { + fn visit_stmt(&mut self, s: &'tcx Stmt) { debug!("visit_stmt: st={:?}", self.st); // We don't want to modify the hash for decls, because @@ -258,97 +357,201 @@ impl<'a, 'tcx> Visitor<'a> for StrictVersionHashVisitor<'a, 'tcx> { // rules). match s.node { StmtDecl(..) => (), - StmtExpr(..) => SawStmt(SawStmtExpr).hash(self.st), - StmtSemi(..) => SawStmt(SawStmtSemi).hash(self.st), + StmtExpr(..) => { + SawStmt.hash(self.st); + self.hash_discriminant(&s.node); + hash_span!(self, s.span); + } + StmtSemi(..) => { + SawStmt.hash(self.st); + self.hash_discriminant(&s.node); + hash_span!(self, s.span); + } } visit::walk_stmt(self, s) } - fn visit_foreign_item(&mut self, i: &'a ForeignItem) { + fn visit_foreign_item(&mut self, i: &'tcx ForeignItem) { debug!("visit_foreign_item: st={:?}", self.st); - // FIXME (#14132) ideally we would incorporate privacy (or - // perhaps reachability) somewhere here, so foreign items - // that do not leak into downstream crates would not be - // part of the ABI. - SawForeignItem.hash(self.st); visit::walk_foreign_item(self, i) + SawForeignItem.hash(self.st); + hash_span!(self, i.span); + hash_attrs!(self, &i.attrs); + visit::walk_foreign_item(self, i) } - fn visit_item(&mut self, i: &'a Item) { + fn visit_item(&mut self, i: &'tcx Item) { debug!("visit_item: {:?} st={:?}", i, self.st); - // FIXME (#14132) ideally would incorporate reachability - // analysis somewhere here, so items that never leak into - // downstream crates (e.g. via monomorphisation or - // inlining) would not be part of the ABI. - SawItem.hash(self.st); visit::walk_item(self, i) + SawItem.hash(self.st); + // Hash the value of the discriminant of the Item variant. + self.hash_discriminant(&i.node); + hash_span!(self, i.span); + hash_attrs!(self, &i.attrs); + visit::walk_item(self, i) } - fn visit_mod(&mut self, m: &'a Mod, _s: Span, n: NodeId) { + fn visit_mod(&mut self, m: &'tcx Mod, _s: Span, n: NodeId) { debug!("visit_mod: st={:?}", self.st); SawMod.hash(self.st); visit::walk_mod(self, m, n) } - fn visit_ty(&mut self, t: &'a Ty) { + fn visit_ty(&mut self, t: &'tcx Ty) { debug!("visit_ty: st={:?}", self.st); - SawTy.hash(self.st); visit::walk_ty(self, t) + SawTy.hash(self.st); + hash_span!(self, t.span); + visit::walk_ty(self, t) } - fn visit_generics(&mut self, g: &'a Generics) { + fn visit_generics(&mut self, g: &'tcx Generics) { debug!("visit_generics: st={:?}", self.st); - SawGenerics.hash(self.st); visit::walk_generics(self, g) + SawGenerics.hash(self.st); + visit::walk_generics(self, g) } - fn visit_fn(&mut self, fk: FnKind<'a>, fd: &'a FnDecl, - b: &'a Block, s: Span, n: NodeId) { - debug!("visit_fn: st={:?}", self.st); - SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s, n) - } - - fn visit_trait_item(&mut self, ti: &'a TraitItem) { + fn visit_trait_item(&mut self, ti: &'tcx TraitItem) { debug!("visit_trait_item: st={:?}", self.st); - SawTraitItem.hash(self.st); visit::walk_trait_item(self, ti) + SawTraitItem.hash(self.st); + self.hash_discriminant(&ti.node); + hash_span!(self, ti.span); + hash_attrs!(self, &ti.attrs); + visit::walk_trait_item(self, ti) } - fn visit_impl_item(&mut self, ii: &'a ImplItem) { + fn visit_impl_item(&mut self, ii: &'tcx ImplItem) { debug!("visit_impl_item: st={:?}", self.st); - SawImplItem.hash(self.st); visit::walk_impl_item(self, ii) + SawImplItem.hash(self.st); + self.hash_discriminant(&ii.node); + hash_span!(self, ii.span); + hash_attrs!(self, &ii.attrs); + visit::walk_impl_item(self, ii) } - fn visit_struct_field(&mut self, s: &'a StructField) { + fn visit_struct_field(&mut self, s: &'tcx StructField) { debug!("visit_struct_field: st={:?}", self.st); - SawStructField.hash(self.st); visit::walk_struct_field(self, s) + SawStructField.hash(self.st); + hash_span!(self, s.span); + hash_attrs!(self, &s.attrs); + visit::walk_struct_field(self, s) } - fn visit_path(&mut self, path: &'a Path, _: ast::NodeId) { + fn visit_path(&mut self, path: &'tcx Path, _: ast::NodeId) { debug!("visit_path: st={:?}", self.st); - SawPath.hash(self.st); visit::walk_path(self, path) + SawPath(path.global).hash(self.st); + hash_span!(self, path.span); + visit::walk_path(self, path) } - fn visit_block(&mut self, b: &'a Block) { + fn visit_block(&mut self, b: &'tcx Block) { debug!("visit_block: st={:?}", self.st); - SawBlock.hash(self.st); visit::walk_block(self, b) + SawBlock.hash(self.st); + hash_span!(self, b.span); + visit::walk_block(self, b) } - fn visit_pat(&mut self, p: &'a Pat) { + fn visit_pat(&mut self, p: &'tcx Pat) { debug!("visit_pat: st={:?}", self.st); - SawPat.hash(self.st); visit::walk_pat(self, p) + SawPat.hash(self.st); + self.hash_discriminant(&p.node); + hash_span!(self, p.span); + visit::walk_pat(self, p) } - fn visit_local(&mut self, l: &'a Local) { + fn visit_local(&mut self, l: &'tcx Local) { debug!("visit_local: st={:?}", self.st); - SawLocal.hash(self.st); visit::walk_local(self, l) + SawLocal.hash(self.st); + hash_attrs!(self, &l.attrs); + visit::walk_local(self, l) + // No need to hash span, we are hashing all component spans } - fn visit_arm(&mut self, a: &'a Arm) { + fn visit_arm(&mut self, a: &'tcx Arm) { debug!("visit_arm: st={:?}", self.st); - SawArm.hash(self.st); visit::walk_arm(self, a) + SawArm.hash(self.st); + hash_attrs!(self, &a.attrs); + visit::walk_arm(self, a) } fn visit_id(&mut self, id: NodeId) { debug!("visit_id: id={} st={:?}", id, self.st); - self.hash_resolve(id); + self.hash_resolve(id) + } + + fn visit_vis(&mut self, v: &'tcx Visibility) { + debug!("visit_vis: st={:?}", self.st); + SawVis.hash(self.st); + self.hash_discriminant(v); + visit::walk_vis(self, v) + } + + fn visit_where_predicate(&mut self, predicate: &'tcx WherePredicate) { + debug!("visit_where_predicate: st={:?}", self.st); + SawWherePredicate.hash(self.st); + self.hash_discriminant(predicate); + // Ignoring span. Any important nested components should be visited. + visit::walk_where_predicate(self, predicate) + } + + fn visit_ty_param_bound(&mut self, bounds: &'tcx TyParamBound) { + debug!("visit_ty_param_bound: st={:?}", self.st); + SawTyParamBound.hash(self.st); + self.hash_discriminant(bounds); + // The TraitBoundModifier in TraitTyParamBound will be hash in + // visit_poly_trait_ref() + visit::walk_ty_param_bound(self, bounds) + } + + fn visit_poly_trait_ref(&mut self, t: &'tcx PolyTraitRef, m: &'tcx TraitBoundModifier) { + debug!("visit_poly_trait_ref: st={:?}", self.st); + SawPolyTraitRef.hash(self.st); + m.hash(self.st); + visit::walk_poly_trait_ref(self, t, m) + } + + fn visit_path_list_item(&mut self, prefix: &'tcx Path, item: &'tcx PathListItem) { + debug!("visit_path_list_item: st={:?}", self.st); + SawPathListItem.hash(self.st); + self.hash_discriminant(&item.node); + hash_span!(self, item.span); + visit::walk_path_list_item(self, prefix, item) + } + + fn visit_path_segment(&mut self, path_span: Span, path_segment: &'tcx PathSegment) { + debug!("visit_path_segment: st={:?}", self.st); + SawPathSegment.hash(self.st); + visit::walk_path_segment(self, path_span, path_segment) + } + + fn visit_path_parameters(&mut self, path_span: Span, path_parameters: &'tcx PathParameters) { + debug!("visit_path_parameters: st={:?}", self.st); + SawPathParameters.hash(self.st); + self.hash_discriminant(path_parameters); + visit::walk_path_parameters(self, path_span, path_parameters) + } + + fn visit_assoc_type_binding(&mut self, type_binding: &'tcx TypeBinding) { + debug!("visit_assoc_type_binding: st={:?}", self.st); + SawAssocTypeBinding.hash(self.st); + hash_span!(self, type_binding.span); + visit::walk_assoc_type_binding(self, type_binding) + } + + fn visit_attribute(&mut self, _: &ast::Attribute) { + // We explicitly do not use this method, since doing that would + // implicitly impose an order on the attributes being hashed, while we + // explicitly don't want their order to matter + } + + fn visit_macro_def(&mut self, macro_def: &'tcx MacroDef) { + debug!("visit_macro_def: st={:?}", self.st); + if macro_def.export { + SawMacroDef.hash(self.st); + hash_attrs!(self, ¯o_def.attrs); + visit::walk_macro_def(self, macro_def) + // FIXME(mw): We should hash the body of the macro too but we don't + // have a stable way of doing so yet. + } } } @@ -361,7 +564,7 @@ pub enum DefHash { SawErr, } -impl<'a, 'tcx> StrictVersionHashVisitor<'a, 'tcx> { +impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> { fn hash_resolve(&mut self, id: ast::NodeId) { // Because whether or not a given id has an entry is dependent // solely on expr variant etc, we don't need to hash whether @@ -369,20 +572,29 @@ impl<'a, 'tcx> StrictVersionHashVisitor<'a, 'tcx> { // variant it is above when we visit the HIR). if let Some(def) = self.tcx.def_map.borrow().get(&id) { + debug!("hash_resolve: id={:?} def={:?} st={:?}", id, def, self.st); self.hash_partial_def(def); } if let Some(traits) = self.tcx.trait_map.get(&id) { + debug!("hash_resolve: id={:?} traits={:?} st={:?}", id, traits, self.st); traits.len().hash(self.st); - for candidate in traits { - self.hash_def_id(candidate.def_id); - } + + // The ordering of the candidates is not fixed. So we hash + // the def-ids and then sort them and hash the collection. + let mut candidates: Vec<_> = + traits.iter() + .map(|&TraitCandidate { def_id, import_id: _ }| { + self.compute_def_id_hash(def_id) + }) + .collect(); + candidates.sort(); + candidates.hash(self.st); } } fn hash_def_id(&mut self, def_id: DefId) { - let def_path = self.tcx.def_path(def_id); - self.hash_def_path(&def_path); + self.compute_def_id_hash(def_id).hash(self.st); } fn hash_partial_def(&mut self, def: &PathResolution) { @@ -397,7 +609,6 @@ impl<'a, 'tcx> StrictVersionHashVisitor<'a, 'tcx> { // def-id is the same, so it suffices to hash the def-id Def::Fn(..) | Def::Mod(..) | - Def::ForeignMod(..) | Def::Static(..) | Def::Variant(..) | Def::Enum(..) | @@ -405,6 +616,7 @@ impl<'a, 'tcx> StrictVersionHashVisitor<'a, 'tcx> { Def::AssociatedTy(..) | Def::TyParam(..) | Def::Struct(..) | + Def::Union(..) | Def::Trait(..) | Def::Method(..) | Def::Const(..) | @@ -436,4 +648,68 @@ impl<'a, 'tcx> StrictVersionHashVisitor<'a, 'tcx> { } } } + + fn hash_meta_item(&mut self, meta_item: &ast::MetaItem) { + debug!("hash_meta_item: st={:?}", self.st); + + // ignoring span information, it doesn't matter here + self.hash_discriminant(&meta_item.node); + match meta_item.node { + ast::MetaItemKind::Word(ref s) => { + s.len().hash(self.st); + s.hash(self.st); + } + ast::MetaItemKind::NameValue(ref s, ref lit) => { + s.len().hash(self.st); + s.hash(self.st); + lit.node.hash(self.st); + } + ast::MetaItemKind::List(ref s, ref items) => { + s.len().hash(self.st); + s.hash(self.st); + // Sort subitems so the hash does not depend on their order + let indices = self.indices_sorted_by(&items, |p| { + (p.name(), fnv::hash(&p.literal().map(|i| &i.node))) + }); + items.len().hash(self.st); + for (index, &item_index) in indices.iter().enumerate() { + index.hash(self.st); + let nested_meta_item: &ast::NestedMetaItemKind = &items[item_index].node; + self.hash_discriminant(nested_meta_item); + match *nested_meta_item { + ast::NestedMetaItemKind::MetaItem(ref meta_item) => { + self.hash_meta_item(meta_item); + } + ast::NestedMetaItemKind::Literal(ref lit) => { + lit.node.hash(self.st); + } + } + } + } + } + } + + pub fn hash_attributes(&mut self, attributes: &[ast::Attribute]) { + debug!("hash_attributes: st={:?}", self.st); + let indices = self.indices_sorted_by(attributes, |attr| attr.name()); + + for i in indices { + let attr = &attributes[i].node; + if !attr.is_sugared_doc && + !IGNORED_ATTRIBUTES.contains(&&*attr.value.name()) { + SawAttribute(attr.style).hash(self.st); + self.hash_meta_item(&*attr.value); + } + } + } + + fn indices_sorted_by(&mut self, items: &[T], get_key: F) -> Vec + where K: Ord, + F: Fn(&T) -> K + { + let mut indices = Vec::with_capacity(items.len()); + indices.extend(0 .. items.len()); + indices.sort_by_key(|index| get_key(&items[*index])); + indices + } } diff --git a/src/librustc_incremental/lib.rs b/src/librustc_incremental/lib.rs index 0d11b0794f..67104e912f 100644 --- a/src/librustc_incremental/lib.rs +++ b/src/librustc_incremental/lib.rs @@ -19,12 +19,14 @@ html_root_url = "https://doc.rust-lang.org/nightly/")] #![cfg_attr(not(stage0), deny(warnings))] -#![feature(question_mark)] +#![feature(dotdot_in_tuple_patterns)] +#![cfg_attr(stage0, feature(question_mark))] #![feature(rustc_private)] #![feature(staged_api)] +#![feature(rand)] +#![feature(core_intrinsics)] extern crate graphviz; -extern crate rbml; #[macro_use] extern crate rustc; extern crate rustc_data_structures; extern crate serialize as rustc_serialize; @@ -33,14 +35,23 @@ extern crate serialize as rustc_serialize; #[macro_use] extern crate syntax; extern crate syntax_pos; +const ATTR_DIRTY: &'static str = "rustc_dirty"; +const ATTR_CLEAN: &'static str = "rustc_clean"; +const ATTR_DIRTY_METADATA: &'static str = "rustc_metadata_dirty"; +const ATTR_CLEAN_METADATA: &'static str = "rustc_metadata_clean"; +const ATTR_IF_THIS_CHANGED: &'static str = "rustc_if_this_changed"; +const ATTR_THEN_THIS_WOULD_NEED: &'static str = "rustc_then_this_would_need"; + mod assert_dep_graph; mod calculate_svh; mod persist; pub use assert_dep_graph::assert_dep_graph; -pub use calculate_svh::SvhCalculate; +pub use calculate_svh::compute_incremental_hashes_map; +pub use calculate_svh::IncrementalHashesMap; pub use persist::load_dep_graph; pub use persist::save_dep_graph; pub use persist::save_trans_partition; pub use persist::save_work_products; pub use persist::in_incr_comp_dir; +pub use persist::finalize_session_directory; diff --git a/src/librustc_incremental/persist/data.rs b/src/librustc_incremental/persist/data.rs index 12f3ed8ae2..57e7a0bc21 100644 --- a/src/librustc_incremental/persist/data.rs +++ b/src/librustc_incremental/persist/data.rs @@ -13,6 +13,7 @@ use rustc::dep_graph::{DepNode, WorkProduct, WorkProductId}; use rustc::hir::def_id::DefIndex; use std::sync::Arc; +use rustc_data_structures::fnv::FnvHashMap; use super::directory::DefPathIndex; @@ -93,6 +94,18 @@ pub struct SerializedMetadataHashes { /// a `DefPathIndex` that gets retracted to the current `DefId` /// (matching the one found in this structure). pub hashes: Vec, + + /// For each DefIndex (as it occurs in SerializedMetadataHash), this + /// map stores the DefPathIndex (as it occurs in DefIdDirectory), so + /// that we can find the new DefId for a SerializedMetadataHash in a + /// subsequent compilation session. + /// + /// This map is only needed for running auto-tests using the + /// #[rustc_metadata_dirty] and #[rustc_metadata_clean] attributes, and + /// is only populated if -Z query-dep-graph is specified. It will be + /// empty otherwise. Importing crates are perfectly happy with just having + /// the DefIndex. + pub index_map: FnvHashMap } /// The hash for some metadata that (when saving) will be exported diff --git a/src/librustc_incremental/persist/directory.rs b/src/librustc_incremental/persist/directory.rs index 89a79d1a48..619e237ee3 100644 --- a/src/librustc_incremental/persist/directory.rs +++ b/src/librustc_incremental/persist/directory.rs @@ -15,13 +15,11 @@ use rustc::dep_graph::DepNode; use rustc::hir::map::DefPath; -use rustc::hir::def_id::DefId; -use rustc::middle::cstore::LOCAL_CRATE; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use rustc::ty::TyCtxt; use rustc::util::nodemap::DefIdMap; use std::fmt::{self, Debug}; use std::iter::once; -use syntax::ast; /// Index into the DefIdDirectory #[derive(Copy, Clone, Debug, PartialOrd, Ord, Hash, PartialEq, Eq, @@ -43,7 +41,7 @@ pub struct DefIdDirectory { #[derive(Debug, RustcEncodable, RustcDecodable)] pub struct CrateInfo { - krate: ast::CrateNum, + krate: CrateNum, name: String, disambiguator: String, } @@ -53,7 +51,7 @@ impl DefIdDirectory { DefIdDirectory { paths: vec![], krates: krates } } - fn max_current_crate(&self, tcx: TyCtxt) -> ast::CrateNum { + fn max_current_crate(&self, tcx: TyCtxt) -> CrateNum { tcx.sess.cstore.crates() .into_iter() .max() @@ -72,8 +70,8 @@ impl DefIdDirectory { pub fn krate_still_valid(&self, tcx: TyCtxt, - max_current_crate: ast::CrateNum, - krate: ast::CrateNum) -> bool { + max_current_crate: CrateNum, + krate: CrateNum) -> bool { // Check that the crate-number still matches. For now, if it // doesn't, just return None. We could do better, such as // finding the new number. @@ -81,7 +79,7 @@ impl DefIdDirectory { if krate > max_current_crate { false } else { - let old_info = &self.krates[krate as usize]; + let old_info = &self.krates[krate.as_usize()]; assert_eq!(old_info.krate, krate); let old_name: &str = &old_info.name; let old_disambiguator: &str = &old_info.disambiguator; @@ -101,7 +99,7 @@ impl DefIdDirectory { } else { debug!("crate {} changed from {:?} to {:?}/{:?}", path.krate, - self.krates[path.krate as usize], + self.krates[path.krate.as_usize()], tcx.crate_name(path.krate), tcx.crate_disambiguator(path.krate)); None @@ -180,7 +178,6 @@ impl<'a,'tcx> DefIdDirectoryBuilder<'a,'tcx> { &self.directory.paths[id.index as usize] } - pub fn map(&mut self, node: &DepNode) -> DepNode { node.map_def(|&def_id| Some(self.add(def_id))).unwrap() } diff --git a/src/librustc_incremental/persist/dirty_clean.rs b/src/librustc_incremental/persist/dirty_clean.rs index 3c77cc07d3..95452021d8 100644 --- a/src/librustc_incremental/persist/dirty_clean.rs +++ b/src/librustc_incremental/persist/dirty_clean.rs @@ -9,10 +9,10 @@ // except according to those terms. //! Debugging code to test the state of the dependency graph just -//! after it is loaded from disk. For each node marked with -//! `#[rustc_clean]` or `#[rustc_dirty]`, we will check that a -//! suitable node for that item either appears or does not appear in -//! the dep-graph, as appropriate: +//! after it is loaded from disk and just after it has been saved. +//! For each node marked with `#[rustc_clean]` or `#[rustc_dirty]`, +//! we will check that a suitable node for that item either appears +//! or does not appear in the dep-graph, as appropriate: //! //! - `#[rustc_dirty(label="TypeckItemBody", cfg="rev2")]` if we are //! in `#[cfg(rev2)]`, then there MUST NOT be a node @@ -23,6 +23,22 @@ //! //! Errors are reported if we are in the suitable configuration but //! the required condition is not met. +//! +//! The `#[rustc_metadata_dirty]` and `#[rustc_metadata_clean]` attributes +//! can be used to check the incremental compilation hash (ICH) values of +//! metadata exported in rlibs. +//! +//! - If a node is marked with `#[rustc_metadata_clean(cfg="rev2")]` we +//! check that the metadata hash for that node is the same for "rev2" +//! it was for "rev1". +//! - If a node is marked with `#[rustc_metadata_dirty(cfg="rev2")]` we +//! check that the metadata hash for that node is *different* for "rev2" +//! than it was for "rev1". +//! +//! Note that the metadata-testing attributes must never specify the +//! first revision. This would lead to a crash since there is no +//! previous revision to compare things to. +//! use super::directory::RetracedDefIdDirectory; use super::load::DirtyNodes; @@ -30,14 +46,14 @@ use rustc::dep_graph::{DepGraphQuery, DepNode}; use rustc::hir; use rustc::hir::def_id::DefId; use rustc::hir::intravisit::Visitor; -use rustc_data_structures::fnv::FnvHashSet; -use syntax::ast::{self, Attribute, MetaItem}; -use syntax::attr::AttrMetaMethods; +use syntax::ast::{self, Attribute, NestedMetaItem}; +use rustc_data_structures::fnv::{FnvHashSet, FnvHashMap}; use syntax::parse::token::InternedString; +use syntax_pos::Span; use rustc::ty::TyCtxt; -const DIRTY: &'static str = "rustc_dirty"; -const CLEAN: &'static str = "rustc_clean"; +use {ATTR_DIRTY, ATTR_CLEAN, ATTR_DIRTY_METADATA, ATTR_CLEAN_METADATA}; + const LABEL: &'static str = "label"; const CFG: &'static str = "cfg"; @@ -71,46 +87,11 @@ pub struct DirtyCleanVisitor<'a, 'tcx:'a> { } impl<'a, 'tcx> DirtyCleanVisitor<'a, 'tcx> { - fn expect_associated_value(&self, item: &MetaItem) -> InternedString { - if let Some(value) = item.value_str() { - value - } else { - self.tcx.sess.span_fatal( - item.span, - &format!("associated value expected for `{}`", item.name())); - } - } - - /// Given a `#[rustc_dirty]` or `#[rustc_clean]` attribute, scan - /// for a `cfg="foo"` attribute and check whether we have a cfg - /// flag called `foo`. - fn check_config(&self, attr: &ast::Attribute) -> bool { - debug!("check_config(attr={:?})", attr); - let config = &self.tcx.map.krate().config; - debug!("check_config: config={:?}", config); - for item in attr.meta_item_list().unwrap_or(&[]) { - if item.check_name(CFG) { - let value = self.expect_associated_value(item); - debug!("check_config: searching for cfg {:?}", value); - for cfg in &config[..] { - if cfg.check_name(&value[..]) { - debug!("check_config: matched {:?}", cfg); - return true; - } - } - return false; - } - } - - self.tcx.sess.span_fatal( - attr.span, - &format!("no cfg attribute")); - } fn dep_node(&self, attr: &Attribute, def_id: DefId) -> DepNode { for item in attr.meta_item_list().unwrap_or(&[]) { if item.check_name(LABEL) { - let value = self.expect_associated_value(item); + let value = expect_associated_value(self.tcx, item); match DepNode::from_label_string(&value[..], def_id) { Ok(def_id) => return def_id, Err(()) => { @@ -133,6 +114,7 @@ impl<'a, 'tcx> DirtyCleanVisitor<'a, 'tcx> { debug!("assert_dirty({:?})", dep_node); match dep_node { + DepNode::Krate | DepNode::Hir(_) => { // HIR nodes are inputs, so if we are asserting that the HIR node is // dirty, we check the dirty input set. @@ -161,6 +143,7 @@ impl<'a, 'tcx> DirtyCleanVisitor<'a, 'tcx> { debug!("assert_clean({:?})", dep_node); match dep_node { + DepNode::Krate | DepNode::Hir(_) => { // For HIR nodes, check the inputs. if self.dirty_inputs.contains(&dep_node) { @@ -189,12 +172,12 @@ impl<'a, 'tcx> Visitor<'tcx> for DirtyCleanVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { let def_id = self.tcx.map.local_def_id(item.id); for attr in self.tcx.get_attrs(def_id).iter() { - if attr.check_name(DIRTY) { - if self.check_config(attr) { + if attr.check_name(ATTR_DIRTY) { + if check_config(self.tcx, attr) { self.assert_dirty(item, self.dep_node(attr, def_id)); } - } else if attr.check_name(CLEAN) { - if self.check_config(attr) { + } else if attr.check_name(ATTR_CLEAN) { + if check_config(self.tcx, attr) { self.assert_clean(item, self.dep_node(attr, def_id)); } } @@ -202,3 +185,115 @@ impl<'a, 'tcx> Visitor<'tcx> for DirtyCleanVisitor<'a, 'tcx> { } } +pub fn check_dirty_clean_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + prev_metadata_hashes: &FnvHashMap, + current_metadata_hashes: &FnvHashMap) { + if !tcx.sess.opts.debugging_opts.query_dep_graph { + return; + } + + tcx.dep_graph.with_ignore(||{ + let krate = tcx.map.krate(); + krate.visit_all_items(&mut DirtyCleanMetadataVisitor { + tcx: tcx, + prev_metadata_hashes: prev_metadata_hashes, + current_metadata_hashes: current_metadata_hashes, + }); + }); +} + +pub struct DirtyCleanMetadataVisitor<'a, 'tcx:'a, 'm> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + prev_metadata_hashes: &'m FnvHashMap, + current_metadata_hashes: &'m FnvHashMap, +} + +impl<'a, 'tcx, 'm> Visitor<'tcx> for DirtyCleanMetadataVisitor<'a, 'tcx, 'm> { + fn visit_item(&mut self, item: &'tcx hir::Item) { + let def_id = self.tcx.map.local_def_id(item.id); + + for attr in self.tcx.get_attrs(def_id).iter() { + if attr.check_name(ATTR_DIRTY_METADATA) { + if check_config(self.tcx, attr) { + self.assert_state(false, def_id, item.span); + } + } else if attr.check_name(ATTR_CLEAN_METADATA) { + if check_config(self.tcx, attr) { + self.assert_state(true, def_id, item.span); + } + } + } + } +} + +impl<'a, 'tcx, 'm> DirtyCleanMetadataVisitor<'a, 'tcx, 'm> { + + fn assert_state(&self, should_be_clean: bool, def_id: DefId, span: Span) { + let item_path = self.tcx.item_path_str(def_id); + debug!("assert_state({})", item_path); + + if let Some(&prev_hash) = self.prev_metadata_hashes.get(&def_id) { + let hashes_are_equal = prev_hash == self.current_metadata_hashes[&def_id]; + + if should_be_clean && !hashes_are_equal { + self.tcx.sess.span_err( + span, + &format!("Metadata hash of `{}` is dirty, but should be clean", + item_path)); + } + + let should_be_dirty = !should_be_clean; + if should_be_dirty && hashes_are_equal { + self.tcx.sess.span_err( + span, + &format!("Metadata hash of `{}` is clean, but should be dirty", + item_path)); + } + } else { + self.tcx.sess.span_err( + span, + &format!("Could not find previous metadata hash of `{}`", + item_path)); + } + } +} + +/// Given a `#[rustc_dirty]` or `#[rustc_clean]` attribute, scan +/// for a `cfg="foo"` attribute and check whether we have a cfg +/// flag called `foo`. +fn check_config(tcx: TyCtxt, attr: &ast::Attribute) -> bool { + debug!("check_config(attr={:?})", attr); + let config = &tcx.map.krate().config; + debug!("check_config: config={:?}", config); + for item in attr.meta_item_list().unwrap_or(&[]) { + if item.check_name(CFG) { + let value = expect_associated_value(tcx, item); + debug!("check_config: searching for cfg {:?}", value); + for cfg in &config[..] { + if cfg.check_name(&value[..]) { + debug!("check_config: matched {:?}", cfg); + return true; + } + } + return false; + } + } + + tcx.sess.span_fatal( + attr.span, + &format!("no cfg attribute")); +} + +fn expect_associated_value(tcx: TyCtxt, item: &NestedMetaItem) -> InternedString { + if let Some(value) = item.value_str() { + value + } else { + let msg = if let Some(name) = item.name() { + format!("associated value expected for `{}`", name) + } else { + "expected an associated value".to_string() + }; + + tcx.sess.span_fatal(item.span, &msg); + } +} diff --git a/src/librustc_incremental/persist/file_format.rs b/src/librustc_incremental/persist/file_format.rs new file mode 100644 index 0000000000..7c2b69e762 --- /dev/null +++ b/src/librustc_incremental/persist/file_format.rs @@ -0,0 +1,122 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This module defines a generic file format that allows to check if a given +//! file generated by incremental compilation was generated by a compatible +//! compiler version. This file format is used for the on-disk version of the +//! dependency graph and the exported metadata hashes. +//! +//! In practice "compatible compiler version" means "exactly the same compiler +//! version", since the header encodes the git commit hash of the compiler. +//! Since we can always just ignore the incremental compilation cache and +//! compiler versions don't change frequently for the typical user, being +//! conservative here practically has no downside. + +use std::io::{self, Read}; +use std::path::Path; +use std::fs::File; +use std::env; + +use rustc::session::config::nightly_options; + +/// The first few bytes of files generated by incremental compilation +const FILE_MAGIC: &'static [u8] = b"RSIC"; + +/// Change this if the header format changes +const HEADER_FORMAT_VERSION: u16 = 0; + +/// A version string that hopefully is always different for compiler versions +/// with different encodings of incremental compilation artifacts. Contains +/// the git commit hash. +const RUSTC_VERSION: Option<&'static str> = option_env!("CFG_VERSION"); + +pub fn write_file_header(stream: &mut W) -> io::Result<()> { + stream.write_all(FILE_MAGIC)?; + stream.write_all(&[(HEADER_FORMAT_VERSION >> 0) as u8, + (HEADER_FORMAT_VERSION >> 8) as u8])?; + + let rustc_version = rustc_version(); + assert_eq!(rustc_version.len(), (rustc_version.len() as u8) as usize); + stream.write_all(&[rustc_version.len() as u8])?; + stream.write_all(rustc_version.as_bytes())?; + + Ok(()) +} + +/// Reads the contents of a file with a file header as defined in this module. +/// +/// - Returns `Ok(Some(data))` if the file existed and was generated by a +/// compatible compiler version. `data` is the entire contents of the file +/// *after* the header. +/// - Returns `Ok(None)` if the file did not exist or was generated by an +/// incompatible version of the compiler. +/// - Returns `Err(..)` if some kind of IO error occurred while reading the +/// file. +pub fn read_file(path: &Path) -> io::Result>> { + if !path.exists() { + return Ok(None); + } + + let mut file = File::open(path)?; + + // Check FILE_MAGIC + { + debug_assert!(FILE_MAGIC.len() == 4); + let mut file_magic = [0u8; 4]; + file.read_exact(&mut file_magic)?; + if file_magic != FILE_MAGIC { + return Ok(None) + } + } + + // Check HEADER_FORMAT_VERSION + { + debug_assert!(::std::mem::size_of_val(&HEADER_FORMAT_VERSION) == 2); + let mut header_format_version = [0u8; 2]; + file.read_exact(&mut header_format_version)?; + let header_format_version = (header_format_version[0] as u16) | + ((header_format_version[1] as u16) << 8); + + if header_format_version != HEADER_FORMAT_VERSION { + return Ok(None) + } + } + + // Check RUSTC_VERSION + { + let mut rustc_version_str_len = [0u8; 1]; + file.read_exact(&mut rustc_version_str_len)?; + let rustc_version_str_len = rustc_version_str_len[0] as usize; + let mut buffer = Vec::with_capacity(rustc_version_str_len); + buffer.resize(rustc_version_str_len, 0); + file.read_exact(&mut buffer[..])?; + + if &buffer[..] != rustc_version().as_bytes() { + return Ok(None); + } + } + + let mut data = vec![]; + file.read_to_end(&mut data)?; + + Ok(Some(data)) +} + +fn rustc_version() -> String { + if nightly_options::is_nightly_build() { + if let Some(val) = env::var_os("RUSTC_FORCE_INCR_COMP_ARTIFACT_HEADER") { + return val.to_string_lossy().into_owned() + } + } + + RUSTC_VERSION.expect("Cannot use rustc without explicit version for \ + incremental compilation") + .to_string() +} diff --git a/src/librustc_incremental/persist/fs.rs b/src/librustc_incremental/persist/fs.rs new file mode 100644 index 0000000000..428283309b --- /dev/null +++ b/src/librustc_incremental/persist/fs.rs @@ -0,0 +1,1058 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +//! This module manages how the incremental compilation cache is represented in +//! the file system. +//! +//! Incremental compilation caches are managed according to a copy-on-write +//! strategy: Once a complete, consistent cache version is finalized, it is +//! never modified. Instead, when a subsequent compilation session is started, +//! the compiler will allocate a new version of the cache that starts out as +//! a copy of the previous version. Then only this new copy is modified and it +//! will not be visible to other processes until it is finalized. This ensures +//! that multiple compiler processes can be executed concurrently for the same +//! crate without interfering with each other or blocking each other. +//! +//! More concretely this is implemented via the following protocol: +//! +//! 1. For a newly started compilation session, the compiler allocates a +//! new `session` directory within the incremental compilation directory. +//! This session directory will have a unique name that ends with the suffix +//! "-working" and that contains a creation timestamp. +//! 2. Next, the compiler looks for the newest finalized session directory, +//! that is, a session directory from a previous compilation session that +//! has been marked as valid and consistent. A session directory is +//! considered finalized if the "-working" suffix in the directory name has +//! been replaced by the SVH of the crate. +//! 3. Once the compiler has found a valid, finalized session directory, it will +//! hard-link/copy its contents into the new "-working" directory. If all +//! goes well, it will have its own, private copy of the source directory and +//! subsequently not have to worry about synchronizing with other compiler +//! processes. +//! 4. Now the compiler can do its normal compilation process, which involves +//! reading and updating its private session directory. +//! 5. When compilation finishes without errors, the private session directory +//! will be in a state where it can be used as input for other compilation +//! sessions. That is, it will contain a dependency graph and cache artifacts +//! that are consistent with the state of the source code it was compiled +//! from, with no need to change them ever again. At this point, the compiler +//! finalizes and "publishes" its private session directory by renaming it +//! from "s-{timestamp}-{random}-working" to "s-{timestamp}-{SVH}". +//! 6. At this point the "old" session directory that we copied our data from +//! at the beginning of the session has become obsolete because we have just +//! published a more current version. Thus the compiler will delete it. +//! +//! ## Garbage Collection +//! +//! Naively following the above protocol might lead to old session directories +//! piling up if a compiler instance crashes for some reason before its able to +//! remove its private session directory. In order to avoid wasting disk space, +//! the compiler also does some garbage collection each time it is started in +//! incremental compilation mode. Specifically, it will scan the incremental +//! compilation directory for private session directories that are not in use +//! any more and will delete those. It will also delete any finalized session +//! directories for a given crate except for the most recent one. +//! +//! ## Synchronization +//! +//! There is some synchronization needed in order for the compiler to be able to +//! determine whether a given private session directory is not in used any more. +//! This is done by creating a lock file for each session directory and +//! locking it while the directory is still being used. Since file locks have +//! operating system support, we can rely on the lock being released if the +//! compiler process dies for some unexpected reason. Thus, when garbage +//! collecting private session directories, the collecting process can determine +//! whether the directory is still in use by trying to acquire a lock on the +//! file. If locking the file fails, the original process must still be alive. +//! If locking the file succeeds, we know that the owning process is not alive +//! any more and we can safely delete the directory. +//! There is still a small time window between the original process creating the +//! lock file and actually locking it. In order to minimize the chance that +//! another process tries to acquire the lock in just that instance, only +//! session directories that are older than a few seconds are considered for +//! garbage collection. +//! +//! Another case that has to be considered is what happens if one process +//! deletes a finalized session directory that another process is currently +//! trying to copy from. This case is also handled via the lock file. Before +//! a process starts copying a finalized session directory, it will acquire a +//! shared lock on the directory's lock file. Any garbage collecting process, +//! on the other hand, will acquire an exclusive lock on the lock file. +//! Thus, if a directory is being collected, any reader process will fail +//! acquiring the shared lock and will leave the directory alone. Conversely, +//! if a collecting process can't acquire the exclusive lock because the +//! directory is currently being read from, it will leave collecting that +//! directory to another process at a later point in time. +//! The exact same scheme is also used when reading the metadata hashes file +//! from an extern crate. When a crate is compiled, the hash values of its +//! metadata are stored in a file in its session directory. When the +//! compilation session of another crate imports the first crate's metadata, +//! it also has to read in the accompanying metadata hashes. It thus will access +//! the finalized session directory of all crates it links to and while doing +//! so, it will also place a read lock on that the respective session directory +//! so that it won't be deleted while the metadata hashes are loaded. +//! +//! ## Preconditions +//! +//! This system relies on two features being available in the file system in +//! order to work really well: file locking and hard linking. +//! If hard linking is not available (like on FAT) the data in the cache +//! actually has to be copied at the beginning of each session. +//! If file locking does not work reliably (like on NFS), some of the +//! synchronization will go haywire. +//! In both cases we recommend to locate the incremental compilation directory +//! on a file system that supports these things. +//! It might be a good idea though to try and detect whether we are on an +//! unsupported file system and emit a warning in that case. This is not yet +//! implemented. + +use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; +use rustc::hir::svh::Svh; +use rustc::session::Session; +use rustc::ty::TyCtxt; +use rustc::util::fs as fs_util; +use rustc_data_structures::flock; +use rustc_data_structures::fnv::{FnvHashSet, FnvHashMap}; + +use std::ffi::OsString; +use std::fs as std_fs; +use std::io; +use std::mem; +use std::path::{Path, PathBuf}; +use std::time::{UNIX_EPOCH, SystemTime, Duration}; +use std::__rand::{thread_rng, Rng}; + +const LOCK_FILE_EXT: &'static str = ".lock"; +const DEP_GRAPH_FILENAME: &'static str = "dep-graph.bin"; +const WORK_PRODUCTS_FILENAME: &'static str = "work-products.bin"; +const METADATA_HASHES_FILENAME: &'static str = "metadata.bin"; + +pub fn dep_graph_path(sess: &Session) -> PathBuf { + in_incr_comp_dir_sess(sess, DEP_GRAPH_FILENAME) +} + +pub fn work_products_path(sess: &Session) -> PathBuf { + in_incr_comp_dir_sess(sess, WORK_PRODUCTS_FILENAME) +} + +pub fn metadata_hash_export_path(sess: &Session) -> PathBuf { + in_incr_comp_dir_sess(sess, METADATA_HASHES_FILENAME) +} + +pub fn metadata_hash_import_path(import_session_dir: &Path) -> PathBuf { + import_session_dir.join(METADATA_HASHES_FILENAME) +} + +pub fn lock_file_path(session_dir: &Path) -> PathBuf { + let crate_dir = session_dir.parent().unwrap(); + + let directory_name = session_dir.file_name().unwrap().to_string_lossy(); + assert_no_characters_lost(&directory_name); + + let dash_indices: Vec<_> = directory_name.match_indices("-") + .map(|(idx, _)| idx) + .collect(); + if dash_indices.len() != 3 { + bug!("Encountered incremental compilation session directory with \ + malformed name: {}", + session_dir.display()) + } + + crate_dir.join(&directory_name[0 .. dash_indices[2]]) + .with_extension(&LOCK_FILE_EXT[1..]) +} + +pub fn in_incr_comp_dir_sess(sess: &Session, file_name: &str) -> PathBuf { + in_incr_comp_dir(&sess.incr_comp_session_dir(), file_name) +} + +pub fn in_incr_comp_dir(incr_comp_session_dir: &Path, file_name: &str) -> PathBuf { + incr_comp_session_dir.join(file_name) +} + +/// Allocates the private session directory. The boolean in the Ok() result +/// indicates whether we should try loading a dep graph from the successfully +/// initialized directory, or not. +/// The post-condition of this fn is that we have a valid incremental +/// compilation session directory, if the result is `Ok`. A valid session +/// directory is one that contains a locked lock file. It may or may not contain +/// a dep-graph and work products from a previous session. +/// If the call fails, the fn may leave behind an invalid session directory. +/// The garbage collection will take care of it. +pub fn prepare_session_directory(tcx: TyCtxt) -> Result { + debug!("prepare_session_directory"); + + // {incr-comp-dir}/{crate-name-and-disambiguator} + let crate_dir = crate_path_tcx(tcx, LOCAL_CRATE); + debug!("crate-dir: {}", crate_dir.display()); + try!(create_dir(tcx.sess, &crate_dir, "crate")); + + let mut source_directories_already_tried = FnvHashSet(); + + loop { + // Generate a session directory of the form: + // + // {incr-comp-dir}/{crate-name-and-disambiguator}/s-{timestamp}-{random}-working + let session_dir = generate_session_dir_path(&crate_dir); + debug!("session-dir: {}", session_dir.display()); + + // Lock the new session directory. If this fails, return an + // error without retrying + let (directory_lock, lock_file_path) = try!(lock_directory(tcx.sess, &session_dir)); + + // Now that we have the lock, we can actually create the session + // directory + try!(create_dir(tcx.sess, &session_dir, "session")); + + // Find a suitable source directory to copy from. Ignore those that we + // have already tried before. + let source_directory = find_source_directory(&crate_dir, + &source_directories_already_tried); + + let source_directory = if let Some(dir) = source_directory { + dir + } else { + // There's nowhere to copy from, we're done + debug!("no source directory found. Continuing with empty session \ + directory."); + + tcx.sess.init_incr_comp_session(session_dir, directory_lock); + return Ok(false) + }; + + debug!("attempting to copy data from source: {}", + source_directory.display()); + + let print_file_copy_stats = tcx.sess.opts.debugging_opts.incremental_info; + + // Try copying over all files from the source directory + if copy_files(&session_dir, &source_directory, print_file_copy_stats).is_ok() { + debug!("successfully copied data from: {}", + source_directory.display()); + + tcx.sess.init_incr_comp_session(session_dir, directory_lock); + return Ok(true) + } else { + debug!("copying failed - trying next directory"); + + // Something went wrong while trying to copy/link files from the + // source directory. Try again with a different one. + source_directories_already_tried.insert(source_directory); + + // Try to remove the session directory we just allocated. We don't + // know if there's any garbage in it from the failed copy action. + if let Err(err) = safe_remove_dir_all(&session_dir) { + tcx.sess.warn(&format!("Failed to delete partly initialized \ + session dir `{}`: {}", + session_dir.display(), + err)); + } + + delete_session_dir_lock_file(tcx.sess, &lock_file_path); + mem::drop(directory_lock); + } + } +} + + +/// This function finalizes and thus 'publishes' the session directory by +/// renaming it to `s-{timestamp}-{svh}` and releasing the file lock. +/// If there have been compilation errors, however, this function will just +/// delete the presumably invalid session directory. +pub fn finalize_session_directory(sess: &Session, svh: Svh) { + if sess.opts.incremental.is_none() { + return; + } + + let incr_comp_session_dir: PathBuf = sess.incr_comp_session_dir().clone(); + + if sess.has_errors() { + // If there have been any errors during compilation, we don't want to + // publish this session directory. Rather, we'll just delete it. + + debug!("finalize_session_directory() - invalidating session directory: {}", + incr_comp_session_dir.display()); + + if let Err(err) = safe_remove_dir_all(&*incr_comp_session_dir) { + sess.warn(&format!("Error deleting incremental compilation \ + session directory `{}`: {}", + incr_comp_session_dir.display(), + err)); + } + + let lock_file_path = lock_file_path(&*incr_comp_session_dir); + delete_session_dir_lock_file(sess, &lock_file_path); + sess.mark_incr_comp_session_as_invalid(); + } + + debug!("finalize_session_directory() - session directory: {}", + incr_comp_session_dir.display()); + + let old_sub_dir_name = incr_comp_session_dir.file_name() + .unwrap() + .to_string_lossy(); + assert_no_characters_lost(&old_sub_dir_name); + + // Keep the 's-{timestamp}-{random-number}' prefix, but replace the + // '-working' part with the SVH of the crate + let dash_indices: Vec<_> = old_sub_dir_name.match_indices("-") + .map(|(idx, _)| idx) + .collect(); + if dash_indices.len() != 3 { + bug!("Encountered incremental compilation session directory with \ + malformed name: {}", + incr_comp_session_dir.display()) + } + + // State: "s-{timestamp}-{random-number}-" + let mut new_sub_dir_name = String::from(&old_sub_dir_name[.. dash_indices[2] + 1]); + + // Append the svh + new_sub_dir_name.push_str(&encode_base_36(svh.as_u64())); + + // Create the full path + let new_path = incr_comp_session_dir.parent().unwrap().join(new_sub_dir_name); + debug!("finalize_session_directory() - new path: {}", new_path.display()); + + match std_fs::rename(&*incr_comp_session_dir, &new_path) { + Ok(_) => { + debug!("finalize_session_directory() - directory renamed successfully"); + + // This unlocks the directory + sess.finalize_incr_comp_session(new_path); + } + Err(e) => { + // Warn about the error. However, no need to abort compilation now. + sess.warn(&format!("Error finalizing incremental compilation \ + session directory `{}`: {}", + incr_comp_session_dir.display(), + e)); + + debug!("finalize_session_directory() - error, marking as invalid"); + // Drop the file lock, so we can garage collect + sess.mark_incr_comp_session_as_invalid(); + } + } + + let _ = garbage_collect_session_directories(sess); +} + +pub fn delete_all_session_dir_contents(sess: &Session) -> io::Result<()> { + let sess_dir_iterator = sess.incr_comp_session_dir().read_dir()?; + for entry in sess_dir_iterator { + let entry = entry?; + safe_remove_file(&entry.path())? + } + Ok(()) +} + +fn copy_files(target_dir: &Path, + source_dir: &Path, + print_stats_on_success: bool) + -> Result<(), ()> { + // We acquire a shared lock on the lock file of the directory, so that + // nobody deletes it out from under us while we are reading from it. + let lock_file_path = lock_file_path(source_dir); + let _lock = if let Ok(lock) = flock::Lock::new(&lock_file_path, + false, // don't wait, + false, // don't create + false) { // not exclusive + lock + } else { + // Could not acquire the lock, don't try to copy from here + return Err(()) + }; + + let source_dir_iterator = match source_dir.read_dir() { + Ok(it) => it, + Err(_) => return Err(()) + }; + + let mut files_linked = 0; + let mut files_copied = 0; + + for entry in source_dir_iterator { + match entry { + Ok(entry) => { + let file_name = entry.file_name(); + + let target_file_path = target_dir.join(file_name); + let source_path = entry.path(); + + debug!("copying into session dir: {}", source_path.display()); + match fs_util::link_or_copy(source_path, target_file_path) { + Ok(fs_util::LinkOrCopy::Link) => { + files_linked += 1 + } + Ok(fs_util::LinkOrCopy::Copy) => { + files_copied += 1 + } + Err(_) => return Err(()) + } + } + Err(_) => { + return Err(()) + } + } + } + + if print_stats_on_success { + println!("incr. comp. session directory: {} files hard-linked", files_linked); + println!("incr. comp. session directory: {} files copied", files_copied); + } + + Ok(()) +} + +/// Generate unique directory path of the form: +/// {crate_dir}/s-{timestamp}-{random-number}-working +fn generate_session_dir_path(crate_dir: &Path) -> PathBuf { + let timestamp = timestamp_to_string(SystemTime::now()); + debug!("generate_session_dir_path: timestamp = {}", timestamp); + let random_number = thread_rng().next_u32(); + debug!("generate_session_dir_path: random_number = {}", random_number); + + let directory_name = format!("s-{}-{}-working", + timestamp, + encode_base_36(random_number as u64)); + debug!("generate_session_dir_path: directory_name = {}", directory_name); + let directory_path = crate_dir.join(directory_name); + debug!("generate_session_dir_path: directory_path = {}", directory_path.display()); + directory_path +} + +fn create_dir(sess: &Session, path: &Path, dir_tag: &str) -> Result<(),()> { + match fs_util::create_dir_racy(path) { + Ok(()) => { + debug!("{} directory created successfully", dir_tag); + Ok(()) + } + Err(err) => { + sess.err(&format!("Could not create incremental compilation {} \ + directory `{}`: {}", + dir_tag, + path.display(), + err)); + Err(()) + } + } +} + +/// Allocate a the lock-file and lock it. +fn lock_directory(sess: &Session, + session_dir: &Path) + -> Result<(flock::Lock, PathBuf), ()> { + let lock_file_path = lock_file_path(session_dir); + debug!("lock_directory() - lock_file: {}", lock_file_path.display()); + + match flock::Lock::new(&lock_file_path, + false, // don't wait + true, // create the lock file + true) { // the lock should be exclusive + Ok(lock) => Ok((lock, lock_file_path)), + Err(err) => { + sess.err(&format!("incremental compilation: could not create \ + session directory lock file: {}", err)); + Err(()) + } + } +} + +fn delete_session_dir_lock_file(sess: &Session, + lock_file_path: &Path) { + if let Err(err) = safe_remove_file(&lock_file_path) { + sess.warn(&format!("Error deleting lock file for incremental \ + compilation session directory `{}`: {}", + lock_file_path.display(), + err)); + } +} + +/// Find the most recent published session directory that is not in the +/// ignore-list. +fn find_source_directory(crate_dir: &Path, + source_directories_already_tried: &FnvHashSet) + -> Option { + let iter = crate_dir.read_dir() + .unwrap() // FIXME + .filter_map(|e| e.ok().map(|e| e.path())); + + find_source_directory_in_iter(iter, source_directories_already_tried) +} + +fn find_source_directory_in_iter(iter: I, + source_directories_already_tried: &FnvHashSet) + -> Option + where I: Iterator +{ + let mut best_candidate = (UNIX_EPOCH, None); + + for session_dir in iter { + debug!("find_source_directory_in_iter - inspecting `{}`", + session_dir.display()); + + let directory_name = session_dir.file_name().unwrap().to_string_lossy(); + assert_no_characters_lost(&directory_name); + + if source_directories_already_tried.contains(&session_dir) || + !is_session_directory(&directory_name) || + !is_finalized(&directory_name) { + debug!("find_source_directory_in_iter - ignoring."); + continue + } + + let timestamp = extract_timestamp_from_session_dir(&directory_name) + .unwrap_or_else(|_| { + bug!("unexpected incr-comp session dir: {}", session_dir.display()) + }); + + if timestamp > best_candidate.0 { + best_candidate = (timestamp, Some(session_dir.clone())); + } + } + + best_candidate.1 +} + +fn is_finalized(directory_name: &str) -> bool { + !directory_name.ends_with("-working") +} + +fn is_session_directory(directory_name: &str) -> bool { + directory_name.starts_with("s-") && + !directory_name.ends_with(LOCK_FILE_EXT) +} + +fn is_session_directory_lock_file(file_name: &str) -> bool { + file_name.starts_with("s-") && file_name.ends_with(LOCK_FILE_EXT) +} + +fn extract_timestamp_from_session_dir(directory_name: &str) + -> Result { + if !is_session_directory(directory_name) { + return Err(()) + } + + let dash_indices: Vec<_> = directory_name.match_indices("-") + .map(|(idx, _)| idx) + .collect(); + if dash_indices.len() != 3 { + return Err(()) + } + + string_to_timestamp(&directory_name[dash_indices[0]+1 .. dash_indices[1]]) +} + +const BASE_36: &'static [u8] = b"0123456789abcdefghijklmnopqrstuvwxyz"; + +fn encode_base_36(mut n: u64) -> String { + let mut s = Vec::with_capacity(13); + loop { + s.push(BASE_36[(n % 36) as usize]); + n /= 36; + + if n == 0 { + break; + } + } + s.reverse(); + String::from_utf8(s).unwrap() +} + +fn timestamp_to_string(timestamp: SystemTime) -> String { + let duration = timestamp.duration_since(UNIX_EPOCH).unwrap(); + let micros = duration.as_secs() * 1_000_000 + + (duration.subsec_nanos() as u64) / 1000; + encode_base_36(micros) +} + +fn string_to_timestamp(s: &str) -> Result { + let micros_since_unix_epoch = u64::from_str_radix(s, 36); + + if micros_since_unix_epoch.is_err() { + return Err(()) + } + + let micros_since_unix_epoch = micros_since_unix_epoch.unwrap(); + + let duration = Duration::new(micros_since_unix_epoch / 1_000_000, + 1000 * (micros_since_unix_epoch % 1_000_000) as u32); + Ok(UNIX_EPOCH + duration) +} + +fn crate_path_tcx(tcx: TyCtxt, cnum: CrateNum) -> PathBuf { + crate_path(tcx.sess, &tcx.crate_name(cnum), &tcx.crate_disambiguator(cnum)) +} + +/// Finds the session directory containing the correct metadata hashes file for +/// the given crate. In order to do that it has to compute the crate directory +/// of the given crate, and in there, look for the session directory with the +/// correct SVH in it. +/// Note that we have to match on the exact SVH here, not just the +/// crate's (name, disambiguator) pair. The metadata hashes are only valid for +/// the exact version of the binary we are reading from now (i.e. the hashes +/// are part of the dependency graph of a specific compilation session). +pub fn find_metadata_hashes_for(tcx: TyCtxt, cnum: CrateNum) -> Option { + let crate_directory = crate_path_tcx(tcx, cnum); + + if !crate_directory.exists() { + return None + } + + let dir_entries = match crate_directory.read_dir() { + Ok(dir_entries) => dir_entries, + Err(e) => { + tcx.sess + .err(&format!("incremental compilation: Could not read crate directory `{}`: {}", + crate_directory.display(), e)); + return None + } + }; + + let target_svh = tcx.sess.cstore.crate_hash(cnum); + let target_svh = encode_base_36(target_svh.as_u64()); + + let sub_dir = find_metadata_hashes_iter(&target_svh, dir_entries.filter_map(|e| { + e.ok().map(|e| e.file_name().to_string_lossy().into_owned()) + })); + + sub_dir.map(|sub_dir_name| crate_directory.join(&sub_dir_name)) +} + +fn find_metadata_hashes_iter<'a, I>(target_svh: &str, iter: I) -> Option + where I: Iterator +{ + for sub_dir_name in iter { + if !is_session_directory(&sub_dir_name) || !is_finalized(&sub_dir_name) { + // This is not a usable session directory + continue + } + + let is_match = if let Some(last_dash_pos) = sub_dir_name.rfind("-") { + let candidate_svh = &sub_dir_name[last_dash_pos + 1 .. ]; + target_svh == candidate_svh + } else { + // some kind of invalid directory name + continue + }; + + if is_match { + return Some(OsString::from(sub_dir_name)) + } + } + + None +} + +fn crate_path(sess: &Session, + crate_name: &str, + crate_disambiguator: &str) + -> PathBuf { + use std::hash::{Hasher, Hash}; + use std::collections::hash_map::DefaultHasher; + + let incr_dir = sess.opts.incremental.as_ref().unwrap().clone(); + + // The full crate disambiguator is really long. A hash of it should be + // sufficient. + let mut hasher = DefaultHasher::new(); + crate_disambiguator.hash(&mut hasher); + + let crate_name = format!("{}-{}", crate_name, encode_base_36(hasher.finish())); + incr_dir.join(crate_name) +} + +fn assert_no_characters_lost(s: &str) { + if s.contains('\u{FFFD}') { + bug!("Could not losslessly convert '{}'.", s) + } +} + +fn is_old_enough_to_be_collected(timestamp: SystemTime) -> bool { + timestamp < SystemTime::now() - Duration::from_secs(10) +} + +pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> { + debug!("garbage_collect_session_directories() - begin"); + + let session_directory = sess.incr_comp_session_dir(); + debug!("garbage_collect_session_directories() - session directory: {}", + session_directory.display()); + + let crate_directory = session_directory.parent().unwrap(); + debug!("garbage_collect_session_directories() - crate directory: {}", + crate_directory.display()); + + // First do a pass over the crate directory, collecting lock files and + // session directories + let mut session_directories = FnvHashSet(); + let mut lock_files = FnvHashSet(); + + for dir_entry in try!(crate_directory.read_dir()) { + let dir_entry = match dir_entry { + Ok(dir_entry) => dir_entry, + _ => { + // Ignore any errors + continue + } + }; + + let entry_name = dir_entry.file_name(); + let entry_name = entry_name.to_string_lossy(); + + if is_session_directory_lock_file(&entry_name) { + assert_no_characters_lost(&entry_name); + lock_files.insert(entry_name.into_owned()); + } else if is_session_directory(&entry_name) { + assert_no_characters_lost(&entry_name); + session_directories.insert(entry_name.into_owned()); + } else { + // This is something we don't know, leave it alone + } + } + + // Now map from lock files to session directories + let lock_file_to_session_dir: FnvHashMap> = + lock_files.into_iter() + .map(|lock_file_name| { + assert!(lock_file_name.ends_with(LOCK_FILE_EXT)); + let dir_prefix_end = lock_file_name.len() - LOCK_FILE_EXT.len(); + let session_dir = { + let dir_prefix = &lock_file_name[0 .. dir_prefix_end]; + session_directories.iter() + .find(|dir_name| dir_name.starts_with(dir_prefix)) + }; + (lock_file_name, session_dir.map(String::clone)) + }) + .collect(); + + // Delete all lock files, that don't have an associated directory. They must + // be some kind of leftover + for (lock_file_name, directory_name) in &lock_file_to_session_dir { + if directory_name.is_none() { + let timestamp = match extract_timestamp_from_session_dir(lock_file_name) { + Ok(timestamp) => timestamp, + Err(()) => { + debug!("Found lock-file with malformed timestamp: {}", + crate_directory.join(&lock_file_name).display()); + // Ignore it + continue + } + }; + + let lock_file_path = crate_directory.join(&**lock_file_name); + + if is_old_enough_to_be_collected(timestamp) { + debug!("garbage_collect_session_directories() - deleting \ + garbage lock file: {}", lock_file_path.display()); + delete_session_dir_lock_file(sess, &lock_file_path); + } else { + debug!("garbage_collect_session_directories() - lock file with \ + no session dir not old enough to be collected: {}", + lock_file_path.display()); + } + } + } + + // Filter out `None` directories + let lock_file_to_session_dir: FnvHashMap = + lock_file_to_session_dir.into_iter() + .filter_map(|(lock_file_name, directory_name)| { + directory_name.map(|n| (lock_file_name, n)) + }) + .collect(); + + let mut deletion_candidates = vec![]; + let mut definitely_delete = vec![]; + + for (lock_file_name, directory_name) in &lock_file_to_session_dir { + debug!("garbage_collect_session_directories() - inspecting: {}", + directory_name); + + let timestamp = match extract_timestamp_from_session_dir(directory_name) { + Ok(timestamp) => timestamp, + Err(()) => { + debug!("Found session-dir with malformed timestamp: {}", + crate_directory.join(directory_name).display()); + // Ignore it + continue + } + }; + + if is_finalized(directory_name) { + let lock_file_path = crate_directory.join(lock_file_name); + match flock::Lock::new(&lock_file_path, + false, // don't wait + false, // don't create the lock-file + true) { // get an exclusive lock + Ok(lock) => { + debug!("garbage_collect_session_directories() - \ + successfully acquired lock"); + debug!("garbage_collect_session_directories() - adding \ + deletion candidate: {}", directory_name); + + // Note that we are holding on to the lock + deletion_candidates.push((timestamp, + crate_directory.join(directory_name), + Some(lock))); + } + Err(_) => { + debug!("garbage_collect_session_directories() - \ + not collecting, still in use"); + } + } + } else if is_old_enough_to_be_collected(timestamp) { + // When cleaning out "-working" session directories, i.e. + // session directories that might still be in use by another + // compiler instance, we only look a directories that are + // at least ten seconds old. This is supposed to reduce the + // chance of deleting a directory in the time window where + // the process has allocated the directory but has not yet + // acquired the file-lock on it. + + // Try to acquire the directory lock. If we can't, it + // means that the owning process is still alive and we + // leave this directory alone. + let lock_file_path = crate_directory.join(lock_file_name); + match flock::Lock::new(&lock_file_path, + false, // don't wait + false, // don't create the lock-file + true) { // get an exclusive lock + Ok(lock) => { + debug!("garbage_collect_session_directories() - \ + successfully acquired lock"); + + // Note that we are holding on to the lock + definitely_delete.push((crate_directory.join(directory_name), + Some(lock))); + } + Err(_) => { + debug!("garbage_collect_session_directories() - \ + not collecting, still in use"); + } + } + } else { + debug!("garbage_collect_session_directories() - not finalized, not \ + old enough"); + } + } + + // Delete all but the most recent of the candidates + for (path, lock) in all_except_most_recent(deletion_candidates) { + debug!("garbage_collect_session_directories() - deleting `{}`", + path.display()); + + if let Err(err) = safe_remove_dir_all(&path) { + sess.warn(&format!("Failed to garbage collect finalized incremental \ + compilation session directory `{}`: {}", + path.display(), + err)); + } else { + delete_session_dir_lock_file(sess, &lock_file_path(&path)); + } + + + // Let's make it explicit that the file lock is released at this point, + // or rather, that we held on to it until here + mem::drop(lock); + } + + for (path, lock) in definitely_delete { + debug!("garbage_collect_session_directories() - deleting `{}`", + path.display()); + + if let Err(err) = safe_remove_dir_all(&path) { + sess.warn(&format!("Failed to garbage collect incremental \ + compilation session directory `{}`: {}", + path.display(), + err)); + } else { + delete_session_dir_lock_file(sess, &lock_file_path(&path)); + } + + // Let's make it explicit that the file lock is released at this point, + // or rather, that we held on to it until here + mem::drop(lock); + } + + Ok(()) +} + +fn all_except_most_recent(deletion_candidates: Vec<(SystemTime, PathBuf, Option)>) + -> FnvHashMap> { + let most_recent = deletion_candidates.iter() + .map(|&(timestamp, ..)| timestamp) + .max(); + + if let Some(most_recent) = most_recent { + deletion_candidates.into_iter() + .filter(|&(timestamp, ..)| timestamp != most_recent) + .map(|(_, path, lock)| (path, lock)) + .collect() + } else { + FnvHashMap() + } +} + +/// Since paths of artifacts within session directories can get quite long, we +/// need to support deleting files with very long paths. The regular +/// WinApi functions only support paths up to 260 characters, however. In order +/// to circumvent this limitation, we canonicalize the path of the directory +/// before passing it to std::fs::remove_dir_all(). This will convert the path +/// into the '\\?\' format, which supports much longer paths. +fn safe_remove_dir_all(p: &Path) -> io::Result<()> { + if p.exists() { + let canonicalized = try!(p.canonicalize()); + std_fs::remove_dir_all(canonicalized) + } else { + Ok(()) + } +} + +fn safe_remove_file(p: &Path) -> io::Result<()> { + if p.exists() { + let canonicalized = try!(p.canonicalize()); + std_fs::remove_file(canonicalized) + } else { + Ok(()) + } +} + +#[test] +fn test_all_except_most_recent() { + assert_eq!(all_except_most_recent( + vec![ + (UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4"), None), + (UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1"), None), + (UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5"), None), + (UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3"), None), + (UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2"), None), + ]).keys().cloned().collect::>(), + vec![ + PathBuf::from("1"), + PathBuf::from("2"), + PathBuf::from("3"), + PathBuf::from("4"), + ].into_iter().collect::>() + ); + + assert_eq!(all_except_most_recent( + vec![ + ]).keys().cloned().collect::>(), + FnvHashSet() + ); +} + +#[test] +fn test_timestamp_serialization() { + for i in 0 .. 1_000u64 { + let time = UNIX_EPOCH + Duration::new(i * 1_434_578, (i as u32) * 239_000); + let s = timestamp_to_string(time); + assert_eq!(Ok(time), string_to_timestamp(&s)); + } +} + +#[test] +fn test_find_source_directory_in_iter() { + let already_visited = FnvHashSet(); + + // Find newest + assert_eq!(find_source_directory_in_iter( + vec![PathBuf::from("crate-dir/s-3234-0000-svh"), + PathBuf::from("crate-dir/s-2234-0000-svh"), + PathBuf::from("crate-dir/s-1234-0000-svh")].into_iter(), &already_visited), + Some(PathBuf::from("crate-dir/s-3234-0000-svh"))); + + // Filter out "-working" + assert_eq!(find_source_directory_in_iter( + vec![PathBuf::from("crate-dir/s-3234-0000-working"), + PathBuf::from("crate-dir/s-2234-0000-svh"), + PathBuf::from("crate-dir/s-1234-0000-svh")].into_iter(), &already_visited), + Some(PathBuf::from("crate-dir/s-2234-0000-svh"))); + + // Handle empty + assert_eq!(find_source_directory_in_iter(vec![].into_iter(), &already_visited), + None); + + // Handle only working + assert_eq!(find_source_directory_in_iter( + vec![PathBuf::from("crate-dir/s-3234-0000-working"), + PathBuf::from("crate-dir/s-2234-0000-working"), + PathBuf::from("crate-dir/s-1234-0000-working")].into_iter(), &already_visited), + None); +} + +#[test] +fn test_find_metadata_hashes_iter() +{ + assert_eq!(find_metadata_hashes_iter("testsvh2", + vec![ + String::from("s-timestamp1-testsvh1"), + String::from("s-timestamp2-testsvh2"), + String::from("s-timestamp3-testsvh3"), + ].into_iter()), + Some(OsString::from("s-timestamp2-testsvh2")) + ); + + assert_eq!(find_metadata_hashes_iter("testsvh2", + vec![ + String::from("s-timestamp1-testsvh1"), + String::from("s-timestamp2-testsvh2"), + String::from("invalid-name"), + ].into_iter()), + Some(OsString::from("s-timestamp2-testsvh2")) + ); + + assert_eq!(find_metadata_hashes_iter("testsvh2", + vec![ + String::from("s-timestamp1-testsvh1"), + String::from("s-timestamp2-testsvh2-working"), + String::from("s-timestamp3-testsvh3"), + ].into_iter()), + None + ); + + assert_eq!(find_metadata_hashes_iter("testsvh1", + vec![ + String::from("s-timestamp1-random1-working"), + String::from("s-timestamp2-random2-working"), + String::from("s-timestamp3-random3-working"), + ].into_iter()), + None + ); + + assert_eq!(find_metadata_hashes_iter("testsvh2", + vec![ + String::from("timestamp1-testsvh2"), + String::from("timestamp2-testsvh2"), + String::from("timestamp3-testsvh2"), + ].into_iter()), + None + ); +} + +#[test] +fn test_encode_base_36() { + fn test(n: u64) { + assert_eq!(Ok(n), u64::from_str_radix(&encode_base_36(n)[..], 36)); + } + + test(0); + test(1); + test(35); + test(36); + test(37); + test(u64::max_value()); + + for i in 0 .. 1_000 { + test(i * 983); + } +} diff --git a/src/librustc_incremental/persist/hash.rs b/src/librustc_incremental/persist/hash.rs index 344b05f095..ca173db15f 100644 --- a/src/librustc_incremental/persist/hash.rs +++ b/src/librustc_incremental/persist/hash.rs @@ -8,32 +8,34 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use calculate_svh::SvhCalculate; -use rbml::Error; -use rbml::opaque::Decoder; use rustc::dep_graph::DepNode; -use rustc::hir::def_id::DefId; +use rustc::hir::def_id::{CrateNum, DefId}; use rustc::hir::svh::Svh; use rustc::ty::TyCtxt; use rustc_data_structures::fnv::FnvHashMap; +use rustc_data_structures::flock; use rustc_serialize::Decodable; -use std::io::{ErrorKind, Read}; -use std::fs::File; -use syntax::ast; +use rustc_serialize::opaque::Decoder; +use IncrementalHashesMap; use super::data::*; -use super::util::*; +use super::fs::*; +use super::file_format; pub struct HashContext<'a, 'tcx: 'a> { pub tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &'a IncrementalHashesMap, item_metadata_hashes: FnvHashMap, - crate_hashes: FnvHashMap, + crate_hashes: FnvHashMap, } impl<'a, 'tcx> HashContext<'a, 'tcx> { - pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &'a IncrementalHashesMap) + -> Self { HashContext { tcx: tcx, + incremental_hashes_map: incremental_hashes_map, item_metadata_hashes: FnvHashMap(), crate_hashes: FnvHashMap(), } @@ -41,17 +43,32 @@ impl<'a, 'tcx> HashContext<'a, 'tcx> { pub fn is_hashable(dep_node: &DepNode) -> bool { match *dep_node { + DepNode::Krate | DepNode::Hir(_) => true, DepNode::MetaData(def_id) => !def_id.is_local(), _ => false, } } - pub fn hash(&mut self, dep_node: &DepNode) -> Option<(DefId, u64)> { + pub fn hash(&mut self, dep_node: &DepNode) -> Option { match *dep_node { + DepNode::Krate => { + Some(self.incremental_hashes_map[dep_node]) + } + // HIR nodes (which always come from our crate) are an input: DepNode::Hir(def_id) => { - Some((def_id, self.hir_hash(def_id))) + assert!(def_id.is_local(), + "cannot hash HIR for non-local def-id {:?} => {:?}", + def_id, + self.tcx.item_path_str(def_id)); + + assert!(!self.tcx.map.is_inlined_def_id(def_id), + "cannot hash HIR for inlined def-id {:?} => {:?}", + def_id, + self.tcx.item_path_str(def_id)); + + Some(self.incremental_hashes_map[dep_node]) } // MetaData from other crates is an *input* to us. @@ -59,7 +76,7 @@ impl<'a, 'tcx> HashContext<'a, 'tcx> { // don't hash them, but we do compute a hash for them and // save it for others to use. DepNode::MetaData(def_id) if !def_id.is_local() => { - Some((def_id, self.metadata_hash(def_id))) + Some(self.metadata_hash(def_id)) } _ => { @@ -72,21 +89,6 @@ impl<'a, 'tcx> HashContext<'a, 'tcx> { } } - fn hir_hash(&mut self, def_id: DefId) -> u64 { - assert!(def_id.is_local(), - "cannot hash HIR for non-local def-id {:?} => {:?}", - def_id, - self.tcx.item_path_str(def_id)); - - assert!(!self.tcx.map.is_inlined_def_id(def_id), - "cannot hash HIR for inlined def-id {:?} => {:?}", - def_id, - self.tcx.item_path_str(def_id)); - - // FIXME(#32753) -- should we use a distinct hash here - self.tcx.calculate_item_hash(def_id) - } - fn metadata_hash(&mut self, def_id: DefId) -> u64 { debug!("metadata_hash(def_id={:?})", def_id); @@ -116,7 +118,7 @@ impl<'a, 'tcx> HashContext<'a, 'tcx> { } } - fn load_data(&mut self, cnum: ast::CrateNum) { + fn load_data(&mut self, cnum: CrateNum) { debug!("load_data(cnum={})", cnum); let svh = self.tcx.sess.cstore.crate_hash(cnum); @@ -124,45 +126,72 @@ impl<'a, 'tcx> HashContext<'a, 'tcx> { debug!("load_data: svh={}", svh); assert!(old.is_none(), "loaded data for crate {:?} twice", cnum); - if let Some(path) = metadata_hash_path(self.tcx, cnum) { - debug!("load_data: path={:?}", path); - let mut data = vec![]; - match - File::open(&path) - .and_then(|mut file| file.read_to_end(&mut data)) + if let Some(session_dir) = find_metadata_hashes_for(self.tcx, cnum) { + debug!("load_data: session_dir={:?}", session_dir); + + // Lock the directory we'll be reading the hashes from. + let lock_file_path = lock_file_path(&session_dir); + let _lock = match flock::Lock::new(&lock_file_path, + false, // don't wait + false, // don't create the lock-file + false) { // shared lock + Ok(lock) => lock, + Err(err) => { + debug!("Could not acquire lock on `{}` while trying to \ + load metadata hashes: {}", + lock_file_path.display(), + err); + + // Could not acquire the lock. The directory is probably in + // in the process of being deleted. It's OK to just exit + // here. It's the same scenario as if the file had not + // existed in the first place. + return + } + }; + + let hashes_file_path = metadata_hash_import_path(&session_dir); + + match file_format::read_file(&hashes_file_path) { - Ok(_) => { - match self.load_from_data(cnum, &data) { + Ok(Some(data)) => { + match self.load_from_data(cnum, &data, svh) { Ok(()) => { } Err(err) => { bug!("decoding error in dep-graph from `{}`: {}", - path.display(), err); + &hashes_file_path.display(), err); } } } + Ok(None) => { + // If the file is not found, that's ok. + } Err(err) => { - match err.kind() { - ErrorKind::NotFound => { - // If the file is not found, that's ok. - } - _ => { - self.tcx.sess.err( - &format!("could not load dep information from `{}`: {}", - path.display(), err)); - return; - } - } + self.tcx.sess.err( + &format!("could not load dep information from `{}`: {}", + hashes_file_path.display(), err)); } } } } - fn load_from_data(&mut self, cnum: ast::CrateNum, data: &[u8]) -> Result<(), Error> { + fn load_from_data(&mut self, + cnum: CrateNum, + data: &[u8], + expected_svh: Svh) -> Result<(), String> { debug!("load_from_data(cnum={})", cnum); // Load up the hashes for the def-ids from this crate. let mut decoder = Decoder::new(data, 0); - let serialized_hashes = try!(SerializedMetadataHashes::decode(&mut decoder)); + let svh_in_hashes_file = Svh::decode(&mut decoder)?; + + if svh_in_hashes_file != expected_svh { + // We should not be able to get here. If we do, then + // `fs::find_metadata_hashes_for()` has messed up. + bug!("mismatch between SVH in crate and SVH in incr. comp. hashes") + } + + let serialized_hashes = SerializedMetadataHashes::decode(&mut decoder)?; for serialized_hash in serialized_hashes.hashes { // the hashes are stored with just a def-index, which is // always relative to the old crate; convert that to use diff --git a/src/librustc_incremental/persist/load.rs b/src/librustc_incremental/persist/load.rs index c736437df1..db8d3125e5 100644 --- a/src/librustc_incremental/persist/load.rs +++ b/src/librustc_incremental/persist/load.rs @@ -10,23 +10,24 @@ //! Code to save/load the dep-graph from files. -use rbml::Error; -use rbml::opaque::Decoder; use rustc::dep_graph::DepNode; use rustc::hir::def_id::DefId; +use rustc::hir::svh::Svh; use rustc::session::Session; use rustc::ty::TyCtxt; -use rustc_data_structures::fnv::FnvHashSet; +use rustc_data_structures::fnv::{FnvHashSet, FnvHashMap}; use rustc_serialize::Decodable as RustcDecodable; -use std::io::Read; -use std::fs::{self, File}; +use rustc_serialize::opaque::Decoder; +use std::fs; use std::path::{Path}; +use IncrementalHashesMap; use super::data::*; use super::directory::*; use super::dirty_clean; use super::hash::*; -use super::util::*; +use super::fs::*; +use super::file_format; pub type DirtyNodes = FnvHashSet>; @@ -38,29 +39,49 @@ type CleanEdges = Vec<(DepNode, DepNode)>; /// early in compilation, before we've really done any work, but /// actually it doesn't matter all that much.) See `README.md` for /// more general overview. -pub fn load_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { +pub fn load_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &IncrementalHashesMap) { if tcx.sess.opts.incremental.is_none() { return; } + match prepare_session_directory(tcx) { + Ok(true) => { + // We successfully allocated a session directory and there is + // something in it to load, so continue + } + Ok(false) => { + // We successfully allocated a session directory, but there is no + // dep-graph data in it to load (because this is the first + // compilation session with this incr. comp. dir.) + return + } + Err(()) => { + // Something went wrong while trying to allocate the session + // directory. Don't try to use it any further. + return + } + } + let _ignore = tcx.dep_graph.in_ignore(); - load_dep_graph_if_exists(tcx); + load_dep_graph_if_exists(tcx, incremental_hashes_map); } -fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - let dep_graph_path = dep_graph_path(tcx).unwrap(); +fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &IncrementalHashesMap) { + let dep_graph_path = dep_graph_path(tcx.sess); let dep_graph_data = match load_data(tcx.sess, &dep_graph_path) { Some(p) => p, None => return // no file }; - let work_products_path = tcx_work_products_path(tcx).unwrap(); + let work_products_path = work_products_path(tcx.sess); let work_products_data = match load_data(tcx.sess, &work_products_path) { Some(p) => p, None => return // no file }; - match decode_dep_graph(tcx, &dep_graph_data, &work_products_data) { + match decode_dep_graph(tcx, incremental_hashes_map, &dep_graph_data, &work_products_data) { Ok(dirty_nodes) => dirty_nodes, Err(err) => { tcx.sess.warn( @@ -73,41 +94,43 @@ fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { } fn load_data(sess: &Session, path: &Path) -> Option> { - if !path.exists() { - return None; - } - - let mut data = vec![]; - match - File::open(path) - .and_then(|mut file| file.read_to_end(&mut data)) - { - Ok(_) => { - Some(data) + match file_format::read_file(path) { + Ok(Some(data)) => return Some(data), + Ok(None) => { + // The file either didn't exist or was produced by an incompatible + // compiler version. Neither is an error. } Err(err) => { sess.err( &format!("could not load dep-graph from `{}`: {}", path.display(), err)); - None } } + + if let Err(err) = delete_all_session_dir_contents(sess) { + sess.err(&format!("could not clear incompatible incremental \ + compilation session directory `{}`: {}", + path.display(), err)); + } + + None } /// Decode the dep graph and load the edges/nodes that are still clean /// into `tcx.dep_graph`. pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &IncrementalHashesMap, dep_graph_data: &[u8], work_products_data: &[u8]) - -> Result<(), Error> + -> Result<(), String> { // Decode the list of work_products let mut work_product_decoder = Decoder::new(work_products_data, 0); - let work_products = try!(>::decode(&mut work_product_decoder)); + let work_products = >::decode(&mut work_product_decoder)?; // Deserialize the directory and dep-graph. let mut dep_graph_decoder = Decoder::new(dep_graph_data, 0); - let prev_commandline_args_hash = try!(u64::decode(&mut dep_graph_decoder)); + let prev_commandline_args_hash = u64::decode(&mut dep_graph_decoder)?; if prev_commandline_args_hash != tcx.sess.opts.dep_tracking_hash() { // We can't reuse the cache, purge it. @@ -120,8 +143,8 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, return Ok(()); } - let directory = try!(DefIdDirectory::decode(&mut dep_graph_decoder)); - let serialized_dep_graph = try!(SerializedDepGraph::decode(&mut dep_graph_decoder)); + let directory = DefIdDirectory::decode(&mut dep_graph_decoder)?; + let serialized_dep_graph = SerializedDepGraph::decode(&mut dep_graph_decoder)?; // Retrace the paths in the directory to find their current location (if any). let retraced = directory.retrace(tcx); @@ -133,7 +156,10 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // reason for this is that this way we can include nodes that have // been removed (which no longer have a `DefId` in the current // compilation). - let dirty_raw_source_nodes = dirty_nodes(tcx, &serialized_dep_graph.hashes, &retraced); + let dirty_raw_source_nodes = dirty_nodes(tcx, + incremental_hashes_map, + &serialized_dep_graph.hashes, + &retraced); // Create a list of (raw-source-node -> // retracted-target-node) edges. In the process of retracing the @@ -200,22 +226,29 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, dirty_clean::check_dirty_clean_annotations(tcx, &dirty_raw_source_nodes, &retraced); + load_prev_metadata_hashes(tcx, + &retraced, + &mut *incremental_hashes_map.prev_metadata_hashes.borrow_mut()); Ok(()) } /// Computes which of the original set of def-ids are dirty. Stored in /// a bit vector where the index is the DefPathIndex. fn dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - hashes: &[SerializedHash], + incremental_hashes_map: &IncrementalHashesMap, + serialized_hashes: &[SerializedHash], retraced: &RetracedDefIdDirectory) -> DirtyNodes { - let mut hcx = HashContext::new(tcx); + let mut hcx = HashContext::new(tcx, incremental_hashes_map); let mut dirty_nodes = FnvHashSet(); - for hash in hashes { + for hash in serialized_hashes { if let Some(dep_node) = retraced.map(&hash.dep_node) { - let (_, current_hash) = hcx.hash(&dep_node).unwrap(); + let current_hash = hcx.hash(&dep_node).unwrap(); if current_hash == hash.hash { + debug!("initial_dirty_nodes: {:?} is clean (hash={:?})", + dep_node.map_def(|&def_id| Some(tcx.def_path(def_id))).unwrap(), + current_hash); continue; } debug!("initial_dirty_nodes: {:?} is dirty as hash is {:?}, was {:?}", @@ -250,7 +283,7 @@ fn reconcile_work_products<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, .saved_files .iter() .all(|&(_, ref file_name)| { - let path = in_incr_comp_dir(tcx.sess, &file_name).unwrap(); + let path = in_incr_comp_dir_sess(tcx.sess, &file_name); path.exists() }); if all_files_exist { @@ -268,7 +301,7 @@ fn delete_dirty_work_product(tcx: TyCtxt, swp: SerializedWorkProduct) { debug!("delete_dirty_work_product({:?})", swp); for &(_, ref file_name) in &swp.work_product.saved_files { - let path = in_incr_comp_dir(tcx.sess, file_name).unwrap(); + let path = in_incr_comp_dir_sess(tcx.sess, file_name); match fs::remove_file(&path) { Ok(()) => { } Err(err) => { @@ -279,3 +312,57 @@ fn delete_dirty_work_product(tcx: TyCtxt, } } } + +fn load_prev_metadata_hashes(tcx: TyCtxt, + retraced: &RetracedDefIdDirectory, + output: &mut FnvHashMap) { + if !tcx.sess.opts.debugging_opts.query_dep_graph { + return + } + + debug!("load_prev_metadata_hashes() - Loading previous metadata hashes"); + + let file_path = metadata_hash_export_path(tcx.sess); + + if !file_path.exists() { + debug!("load_prev_metadata_hashes() - Couldn't find file containing \ + hashes at `{}`", file_path.display()); + return + } + + debug!("load_prev_metadata_hashes() - File: {}", file_path.display()); + + let data = match file_format::read_file(&file_path) { + Ok(Some(data)) => data, + Ok(None) => { + debug!("load_prev_metadata_hashes() - File produced by incompatible \ + compiler version: {}", file_path.display()); + return + } + Err(err) => { + debug!("load_prev_metadata_hashes() - Error reading file `{}`: {}", + file_path.display(), err); + return + } + }; + + debug!("load_prev_metadata_hashes() - Decoding hashes"); + let mut decoder = Decoder::new(&data, 0); + let _ = Svh::decode(&mut decoder).unwrap(); + let serialized_hashes = SerializedMetadataHashes::decode(&mut decoder).unwrap(); + + debug!("load_prev_metadata_hashes() - Mapping DefIds"); + + assert_eq!(serialized_hashes.index_map.len(), serialized_hashes.hashes.len()); + for serialized_hash in serialized_hashes.hashes { + let def_path_index = serialized_hashes.index_map[&serialized_hash.def_index]; + if let Some(def_id) = retraced.def_id(def_path_index) { + let old = output.insert(def_id, serialized_hash.hash); + assert!(old.is_none(), "already have hash for {:?}", def_id); + } + } + + debug!("load_prev_metadata_hashes() - successfully loaded {} hashes", + serialized_hashes.index_map.len()); +} + diff --git a/src/librustc_incremental/persist/mod.rs b/src/librustc_incremental/persist/mod.rs index 4a042497e0..26fcde0586 100644 --- a/src/librustc_incremental/persist/mod.rs +++ b/src/librustc_incremental/persist/mod.rs @@ -15,15 +15,17 @@ mod data; mod directory; mod dirty_clean; +mod fs; mod hash; mod load; mod preds; mod save; -mod util; mod work_product; +mod file_format; +pub use self::fs::finalize_session_directory; +pub use self::fs::in_incr_comp_dir; pub use self::load::load_dep_graph; pub use self::save::save_dep_graph; pub use self::save::save_work_products; pub use self::work_product::save_trans_partition; -pub use self::util::in_incr_comp_dir; diff --git a/src/librustc_incremental/persist/preds.rs b/src/librustc_incremental/persist/preds.rs index a82951afcb..af13484e42 100644 --- a/src/librustc_incremental/persist/preds.rs +++ b/src/librustc_incremental/persist/preds.rs @@ -62,7 +62,7 @@ impl<'q> Predecessors<'q> { let mut hashes = FnvHashMap(); for input in inputs.values().flat_map(|v| v.iter().cloned()) { hashes.entry(input) - .or_insert_with(|| hcx.hash(input).unwrap().1); + .or_insert_with(|| hcx.hash(input).unwrap()); } Predecessors { diff --git a/src/librustc_incremental/persist/save.rs b/src/librustc_incremental/persist/save.rs index a9523a81fb..bc542b71ac 100644 --- a/src/librustc_incremental/persist/save.rs +++ b/src/librustc_incremental/persist/save.rs @@ -8,65 +8,91 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rbml::opaque::Encoder; use rustc::dep_graph::DepNode; use rustc::hir::def_id::DefId; -use rustc::middle::cstore::LOCAL_CRATE; +use rustc::hir::svh::Svh; use rustc::session::Session; use rustc::ty::TyCtxt; use rustc_data_structures::fnv::FnvHashMap; use rustc_serialize::Encodable as RustcEncodable; -use std::hash::{Hash, Hasher, SipHasher}; +use rustc_serialize::opaque::Encoder; +use std::hash::{Hash, Hasher}; use std::io::{self, Cursor, Write}; use std::fs::{self, File}; use std::path::PathBuf; +use std::collections::hash_map::DefaultHasher; +use IncrementalHashesMap; use super::data::*; use super::directory::*; use super::hash::*; use super::preds::*; -use super::util::*; +use super::fs::*; +use super::dirty_clean; +use super::file_format; -pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { +pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &IncrementalHashesMap, + svh: Svh) { debug!("save_dep_graph()"); let _ignore = tcx.dep_graph.in_ignore(); let sess = tcx.sess; if sess.opts.incremental.is_none() { return; } - let mut hcx = HashContext::new(tcx); + let mut builder = DefIdDirectoryBuilder::new(tcx); let query = tcx.dep_graph.query(); + let mut hcx = HashContext::new(tcx, incremental_hashes_map); let preds = Predecessors::new(&query, &mut hcx); + let mut current_metadata_hashes = FnvHashMap(); + + // IMPORTANT: We are saving the metadata hashes *before* the dep-graph, + // since metadata-encoding might add new entries to the + // DefIdDirectory (which is saved in the dep-graph file). save_in(sess, - dep_graph_path(tcx), - |e| encode_dep_graph(&preds, &mut builder, e)); + metadata_hash_export_path(sess), + |e| encode_metadata_hashes(tcx, + svh, + &preds, + &mut builder, + &mut current_metadata_hashes, + e)); save_in(sess, - metadata_hash_path(tcx, LOCAL_CRATE), - |e| encode_metadata_hashes(tcx, &preds, &mut builder, e)); + dep_graph_path(sess), + |e| encode_dep_graph(&preds, &mut builder, e)); + + let prev_metadata_hashes = incremental_hashes_map.prev_metadata_hashes.borrow(); + dirty_clean::check_dirty_clean_metadata(tcx, + &*prev_metadata_hashes, + ¤t_metadata_hashes); } -pub fn save_work_products(sess: &Session, local_crate_name: &str) { +pub fn save_work_products(sess: &Session) { + if sess.opts.incremental.is_none() { + return; + } + debug!("save_work_products()"); let _ignore = sess.dep_graph.in_ignore(); - let path = sess_work_products_path(sess, local_crate_name); + let path = work_products_path(sess); save_in(sess, path, |e| encode_work_products(sess, e)); } -fn save_in(sess: &Session, opt_path_buf: Option, encode: F) +fn save_in(sess: &Session, path_buf: PathBuf, encode: F) where F: FnOnce(&mut Encoder) -> io::Result<()> { - let path_buf = match opt_path_buf { - Some(p) => p, - None => return, - }; - - // FIXME(#32754) lock file? + debug!("save: storing data in {}", path_buf.display()); // delete the old dep-graph, if any + // Note: It's important that we actually delete the old file and not just + // truncate and overwrite it, since it might be a shared hard-link, the + // underlying data of which we don't want to modify if path_buf.exists() { match fs::remove_file(&path_buf) { - Ok(()) => {} + Ok(()) => { + debug!("save: remove old file"); + } Err(err) => { sess.err(&format!("unable to delete old dep-graph at `{}`: {}", path_buf.display(), @@ -78,6 +104,7 @@ fn save_in(sess: &Session, opt_path_buf: Option, encode: F) // generate the data in a memory buffer let mut wr = Cursor::new(Vec::new()); + file_format::write_file_header(&mut wr).unwrap(); match encode(&mut Encoder::new(&mut wr)) { Ok(()) => {} Err(err) => { @@ -91,7 +118,9 @@ fn save_in(sess: &Session, opt_path_buf: Option, encode: F) // write the data out let data = wr.into_inner(); match File::create(&path_buf).and_then(|mut file| file.write_all(&data)) { - Ok(_) => {} + Ok(_) => { + debug!("save: data written to disk successfully"); + } Err(err) => { sess.err(&format!("failed to write dep-graph to `{}`: {}", path_buf.display(), @@ -107,7 +136,7 @@ pub fn encode_dep_graph(preds: &Predecessors, -> io::Result<()> { // First encode the commandline arguments hash let tcx = builder.tcx(); - try!(tcx.sess.opts.dep_tracking_hash().encode(encoder)); + tcx.sess.opts.dep_tracking_hash().encode(encoder)?; // Create a flat list of (Input, WorkProduct) edges for // serialization. @@ -146,27 +175,19 @@ pub fn encode_dep_graph(preds: &Predecessors, debug!("graph = {:#?}", graph); // Encode the directory and then the graph data. - try!(builder.directory().encode(encoder)); - try!(graph.encode(encoder)); + builder.directory().encode(encoder)?; + graph.encode(encoder)?; Ok(()) } pub fn encode_metadata_hashes(tcx: TyCtxt, + svh: Svh, preds: &Predecessors, builder: &mut DefIdDirectoryBuilder, + current_metadata_hashes: &mut FnvHashMap, encoder: &mut Encoder) -> io::Result<()> { - let mut def_id_hashes = FnvHashMap(); - let mut def_id_hash = |def_id: DefId| -> u64 { - *def_id_hashes.entry(def_id) - .or_insert_with(|| { - let index = builder.add(def_id); - let path = builder.lookup_def_path(index); - path.deterministic_hash(tcx) - }) - }; - // For each `MetaData(X)` node where `X` is local, accumulate a // hash. These are the metadata items we export. Downstream // crates will want to see a hash that tells them whether we might @@ -174,7 +195,13 @@ pub fn encode_metadata_hashes(tcx: TyCtxt, // compiled. // // (I initially wrote this with an iterator, but it seemed harder to read.) - let mut serialized_hashes = SerializedMetadataHashes { hashes: vec![] }; + let mut serialized_hashes = SerializedMetadataHashes { + hashes: vec![], + index_map: FnvHashMap() + }; + + let mut def_id_hashes = FnvHashMap(); + for (&target, sources) in &preds.inputs { let def_id = match *target { DepNode::MetaData(def_id) => { @@ -184,6 +211,15 @@ pub fn encode_metadata_hashes(tcx: TyCtxt, _ => continue, }; + let mut def_id_hash = |def_id: DefId| -> u64 { + *def_id_hashes.entry(def_id) + .or_insert_with(|| { + let index = builder.add(def_id); + let path = builder.lookup_def_path(index); + path.deterministic_hash(tcx) + }) + }; + // To create the hash for each item `X`, we don't hash the raw // bytes of the metadata (though in principle we // could). Instead, we walk the predecessors of `MetaData(X)` @@ -206,7 +242,7 @@ pub fn encode_metadata_hashes(tcx: TyCtxt, .collect(); hashes.sort(); - let mut state = SipHasher::new(); + let mut state = DefaultHasher::new(); hashes.hash(&mut state); let hash = state.finish(); @@ -217,8 +253,25 @@ pub fn encode_metadata_hashes(tcx: TyCtxt, }); } + if tcx.sess.opts.debugging_opts.query_dep_graph { + for serialized_hash in &serialized_hashes.hashes { + let def_id = DefId::local(serialized_hash.def_index); + + // Store entry in the index_map + let def_path_index = builder.add(def_id); + serialized_hashes.index_map.insert(def_id.index, def_path_index); + + // Record hash in current_metadata_hashes + current_metadata_hashes.insert(def_id, serialized_hash.hash); + } + + debug!("save: stored index_map (len={}) for serialized hashes", + serialized_hashes.index_map.len()); + } + // Encode everything. - try!(serialized_hashes.encode(encoder)); + svh.encode(encoder)?; + serialized_hashes.encode(encoder)?; Ok(()) } diff --git a/src/librustc_incremental/persist/util.rs b/src/librustc_incremental/persist/util.rs deleted file mode 100644 index f1e81fdb26..0000000000 --- a/src/librustc_incremental/persist/util.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::middle::cstore::LOCAL_CRATE; -use rustc::session::Session; -use rustc::ty::TyCtxt; - -use std::fs; -use std::io; -use std::path::{Path, PathBuf}; -use syntax::ast; - -pub fn dep_graph_path(tcx: TyCtxt) -> Option { - tcx_path(tcx, LOCAL_CRATE, "local") -} - -pub fn metadata_hash_path(tcx: TyCtxt, cnum: ast::CrateNum) -> Option { - tcx_path(tcx, cnum, "metadata") -} - -pub fn tcx_work_products_path(tcx: TyCtxt) -> Option { - let crate_name = tcx.crate_name(LOCAL_CRATE); - sess_work_products_path(tcx.sess, &crate_name) -} - -pub fn sess_work_products_path(sess: &Session, - local_crate_name: &str) - -> Option { - let crate_disambiguator = sess.local_crate_disambiguator(); - path(sess, local_crate_name, &crate_disambiguator, "work-products") -} - -pub fn in_incr_comp_dir(sess: &Session, file_name: &str) -> Option { - sess.opts.incremental.as_ref().map(|incr_dir| incr_dir.join(file_name)) -} - -fn tcx_path(tcx: TyCtxt, - cnum: ast::CrateNum, - middle: &str) - -> Option { - path(tcx.sess, &tcx.crate_name(cnum), &tcx.crate_disambiguator(cnum), middle) -} - -fn path(sess: &Session, - crate_name: &str, - crate_disambiguator: &str, - middle: &str) - -> Option { - // For now, just save/load dep-graph from - // directory/dep_graph.rbml - sess.opts.incremental.as_ref().and_then(|incr_dir| { - match create_dir_racy(&incr_dir) { - Ok(()) => {} - Err(err) => { - sess.err( - &format!("could not create the directory `{}`: {}", - incr_dir.display(), err)); - return None; - } - } - - let file_name = format!("{}-{}.{}.bin", crate_name, crate_disambiguator, middle); - - Some(incr_dir.join(file_name)) - }) -} - -// Like std::fs::create_dir_all, except handles concurrent calls among multiple -// threads or processes. -fn create_dir_racy(path: &Path) -> io::Result<()> { - match fs::create_dir(path) { - Ok(()) => return Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => return Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::NotFound => {} - Err(e) => return Err(e), - } - match path.parent() { - Some(p) => try!(create_dir_racy(p)), - None => return Err(io::Error::new(io::ErrorKind::Other, - "failed to create whole tree")), - } - match fs::create_dir(path) { - Ok(()) => Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()), - Err(e) => Err(e), - } -} - diff --git a/src/librustc_incremental/persist/work_product.rs b/src/librustc_incremental/persist/work_product.rs index c106ea8f26..a9ebd27ce9 100644 --- a/src/librustc_incremental/persist/work_product.rs +++ b/src/librustc_incremental/persist/work_product.rs @@ -10,7 +10,7 @@ //! This module contains files for saving intermediate work-products. -use persist::util::*; +use persist::fs::*; use rustc::dep_graph::{WorkProduct, WorkProductId}; use rustc::session::Session; use rustc::session::config::OutputType; @@ -35,7 +35,7 @@ pub fn save_trans_partition(sess: &Session, files.iter() .map(|&(kind, ref path)| { let file_name = format!("cgu-{}.{}", cgu_name, kind.extension()); - let path_in_incr_dir = in_incr_comp_dir(sess, &file_name).unwrap(); + let path_in_incr_dir = in_incr_comp_dir_sess(sess, &file_name); match link_or_copy(path, &path_in_incr_dir) { Ok(_) => Some((kind, file_name)), Err(err) => { diff --git a/src/librustc_lint/bad_style.rs b/src/librustc_lint/bad_style.rs index 15914838ac..84d65308f9 100644 --- a/src/librustc_lint/bad_style.rs +++ b/src/librustc_lint/bad_style.rs @@ -14,7 +14,7 @@ use lint::{LateContext, LintContext, LintArray}; use lint::{LintPass, LateLintPass}; use syntax::ast; -use syntax::attr::{self, AttrMetaMethods}; +use syntax::attr; use syntax_pos::Span; use rustc::hir::{self, PatKind}; @@ -111,7 +111,7 @@ impl LateLintPass for NonCamelCaseTypes { } match it.node { - hir::ItemTy(..) | hir::ItemStruct(..) => { + hir::ItemTy(..) | hir::ItemStruct(..) | hir::ItemUnion(..) => { self.check_case(cx, "type", it.name, it.span) } hir::ItemTrait(..) => { @@ -239,7 +239,7 @@ impl LateLintPass for NonSnakeCase { fk: FnKind, _: &hir::FnDecl, _: &hir::Block, span: Span, id: ast::NodeId) { match fk { - FnKind::Method(name, _, _, _) => match method_context(cx, id, span) { + FnKind::Method(name, ..) => match method_context(cx, id, span) { MethodLateContext::PlainImpl => { self.check_snake_case(cx, "method", &name.as_str(), Some(span)) }, @@ -248,7 +248,7 @@ impl LateLintPass for NonSnakeCase { }, _ => (), }, - FnKind::ItemFn(name, _, _, _, _, _, _) => { + FnKind::ItemFn(name, ..) => { self.check_snake_case(cx, "function", &name.as_str(), Some(span)) }, FnKind::Closure(_) => (), diff --git a/src/librustc_lint/builtin.rs b/src/librustc_lint/builtin.rs index ed17f3533d..b610a924a3 100644 --- a/src/librustc_lint/builtin.rs +++ b/src/librustc_lint/builtin.rs @@ -44,8 +44,8 @@ use lint::{LintPass, LateLintPass}; use std::collections::HashSet; use syntax::{ast}; -use syntax::attr::{self, AttrMetaMethods, AttributeMethods}; -use syntax_pos::{self, Span}; +use syntax::attr; +use syntax_pos::{Span}; use rustc::hir::{self, PatKind}; use rustc::hir::intravisit::FnKind; @@ -72,7 +72,7 @@ impl LintPass for WhileTrue { impl LateLintPass for WhileTrue { fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { - if let hir::ExprWhile(ref cond, _, _) = e.node { + if let hir::ExprWhile(ref cond, ..) = e.node { if let hir::ExprLit(ref lit) = cond.node { if let ast::LitKind::Bool(true) = lit.node { cx.span_lint(WHILE_TRUE, e.span, @@ -116,7 +116,8 @@ impl LateLintPass for BoxPointers { hir::ItemFn(..) | hir::ItemTy(..) | hir::ItemEnum(..) | - hir::ItemStruct(..) => + hir::ItemStruct(..) | + hir::ItemUnion(..) => self.check_heap_type(cx, it.span, cx.tcx.node_id_to_type(it.id)), _ => () @@ -124,7 +125,8 @@ impl LateLintPass for BoxPointers { // If it's a struct, we also have to check the fields' types match it.node { - hir::ItemStruct(ref struct_def, _) => { + hir::ItemStruct(ref struct_def, _) | + hir::ItemUnion(ref struct_def, _) => { for struct_field in struct_def.fields() { self.check_heap_type(cx, struct_field.span, cx.tcx.node_id_to_type(struct_field.id)); @@ -201,10 +203,10 @@ impl LateLintPass for UnsafeCode { fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { match it.node { - hir::ItemTrait(hir::Unsafety::Unsafe, _, _, _) => + hir::ItemTrait(hir::Unsafety::Unsafe, ..) => cx.span_lint(UNSAFE_CODE, it.span, "declaration of an `unsafe` trait"), - hir::ItemImpl(hir::Unsafety::Unsafe, _, _, _, _, _) => + hir::ItemImpl(hir::Unsafety::Unsafe, ..) => cx.span_lint(UNSAFE_CODE, it.span, "implementation of an `unsafe` trait"), _ => return, @@ -214,10 +216,10 @@ impl LateLintPass for UnsafeCode { fn check_fn(&mut self, cx: &LateContext, fk: FnKind, _: &hir::FnDecl, _: &hir::Block, span: Span, _: ast::NodeId) { match fk { - FnKind::ItemFn(_, _, hir::Unsafety::Unsafe, _, _, _, _) => + FnKind::ItemFn(_, _, hir::Unsafety::Unsafe, ..) => cx.span_lint(UNSAFE_CODE, span, "declaration of an `unsafe` function"), - FnKind::Method(_, sig, _, _) => { + FnKind::Method(_, sig, ..) => { if sig.unsafety == hir::Unsafety::Unsafe { cx.span_lint(UNSAFE_CODE, span, "implementation of an `unsafe` method") } @@ -317,7 +319,7 @@ impl LateLintPass for MissingDoc { let doc_hidden = self.doc_hidden() || attrs.iter().any(|attr| { attr.check_name("doc") && match attr.meta_item_list() { None => false, - Some(l) => attr::contains_name(&l[..], "hidden"), + Some(l) => attr::list_contains_name(&l[..], "hidden"), } }); self.doc_hidden_stack.push(doc_hidden); @@ -348,7 +350,8 @@ impl LateLintPass for MissingDoc { hir::ItemMod(..) => "a module", hir::ItemEnum(..) => "an enum", hir::ItemStruct(..) => "a struct", - hir::ItemTrait(_, _, _, ref items) => { + hir::ItemUnion(..) => "a union", + hir::ItemTrait(.., ref items) => { // Issue #11592, traits are always considered exported, even when private. if it.vis == hir::Visibility::Inherited { self.private_traits.insert(it.id); @@ -360,7 +363,7 @@ impl LateLintPass for MissingDoc { "a trait" }, hir::ItemTy(..) => "a type alias", - hir::ItemImpl(_, _, _, Some(ref trait_ref), _, ref impl_items) => { + hir::ItemImpl(.., Some(ref trait_ref), _, ref impl_items) => { // If the trait is private, add the impl items to private_traits so they don't get // reported for missing docs. let real_trait = cx.tcx.expect_def(trait_ref.ref_id).def_id(); @@ -465,16 +468,21 @@ impl LateLintPass for MissingCopyImplementations { return; } let def = cx.tcx.lookup_adt_def(cx.tcx.map.local_def_id(item.id)); - (def, cx.tcx.mk_struct(def, - cx.tcx.mk_substs(Substs::empty()))) + (def, cx.tcx.mk_adt(def, Substs::empty(cx.tcx))) + } + hir::ItemUnion(_, ref ast_generics) => { + if ast_generics.is_parameterized() { + return; + } + let def = cx.tcx.lookup_adt_def(cx.tcx.map.local_def_id(item.id)); + (def, cx.tcx.mk_adt(def, Substs::empty(cx.tcx))) } hir::ItemEnum(_, ref ast_generics) => { if ast_generics.is_parameterized() { return; } let def = cx.tcx.lookup_adt_def(cx.tcx.map.local_def_id(item.id)); - (def, cx.tcx.mk_enum(def, - cx.tcx.mk_substs(Substs::empty()))) + (def, cx.tcx.mk_adt(def, Substs::empty(cx.tcx))) } _ => return, }; @@ -525,7 +533,7 @@ impl LateLintPass for MissingDebugImplementations { } match item.node { - hir::ItemStruct(..) | hir::ItemEnum(..) => {}, + hir::ItemStruct(..) | hir::ItemUnion(..) | hir::ItemEnum(..) => {}, _ => return, } @@ -898,7 +906,7 @@ impl LateLintPass for UnconditionalRecursion { // A trait method, from any number of possible sources. // Attempt to select a concrete impl before checking. ty::TraitContainer(trait_def_id) => { - let trait_ref = callee_substs.to_trait_ref(tcx, trait_def_id); + let trait_ref = ty::TraitRef::from_method(tcx, trait_def_id, callee_substs); let trait_ref = ty::Binder(trait_ref); let span = tcx.map.span(expr_id); let obligation = @@ -918,8 +926,7 @@ impl LateLintPass for UnconditionalRecursion { // If `T` is `Self`, then this call is inside // a default method definition. Ok(Some(traits::VtableParam(_))) => { - let self_ty = callee_substs.self_ty(); - let on_self = self_ty.map_or(false, |t| t.is_self()); + let on_self = trait_ref.self_ty().is_self(); // We can only be recurring in a default // method if we're being called literally // on the `Self` type. @@ -1030,7 +1037,7 @@ impl LintPass for InvalidNoMangleItems { impl LateLintPass for InvalidNoMangleItems { fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { match it.node { - hir::ItemFn(_, _, _, _, ref generics, _) => { + hir::ItemFn(.., ref generics, _) => { if attr::contains_name(&it.attrs, "no_mangle") { if !cx.access_levels.is_reachable(it.id) { let msg = format!("function {} is marked #[no_mangle], but not exported", @@ -1109,7 +1116,7 @@ impl LateLintPass for MutableTransmutes { } let typ = cx.tcx.node_id_to_type(expr.id); match typ.sty { - ty::TyFnDef(_, _, ref bare_fn) if bare_fn.abi == RustIntrinsic => { + ty::TyFnDef(.., ref bare_fn) if bare_fn.abi == RustIntrinsic => { let from = bare_fn.sig.0.inputs[0]; let to = bare_fn.sig.0.output; return Some((&from.sty, &to.sty)); @@ -1122,7 +1129,7 @@ impl LateLintPass for MutableTransmutes { fn def_id_is_transmute(cx: &LateContext, def_id: DefId) -> bool { match cx.tcx.lookup_item_type(def_id).ty.sty { - ty::TyFnDef(_, _, ref bfty) if bfty.abi == RustIntrinsic => (), + ty::TyFnDef(.., ref bfty) if bfty.abi == RustIntrinsic => (), _ => return false } cx.tcx.item_name(def_id).as_str() == "transmute" @@ -1148,7 +1155,7 @@ impl LintPass for UnstableFeatures { impl LateLintPass for UnstableFeatures { fn check_attribute(&mut self, ctx: &LateContext, attr: &ast::Attribute) { - if attr::contains_name(&[attr.meta().clone()], "feature") { + if attr.meta().check_name("feature") { if let Some(items) = attr.meta().meta_item_list() { for item in items { ctx.span_lint(UNSTABLE_FEATURES, item.span(), "unstable feature"); @@ -1158,55 +1165,35 @@ impl LateLintPass for UnstableFeatures { } } -/// Lints for attempts to impl Drop on types that have `#[repr(C)]` -/// attribute (see issue #24585). -#[derive(Copy, Clone)] -pub struct DropWithReprExtern; +/// Lint for unions that contain fields with possibly non-trivial destructors. +pub struct UnionsWithDropFields; declare_lint! { - DROP_WITH_REPR_EXTERN, + UNIONS_WITH_DROP_FIELDS, Warn, - "use of #[repr(C)] on a type that implements Drop" + "use of unions that contain fields with possibly non-trivial drop code" } -impl LintPass for DropWithReprExtern { +impl LintPass for UnionsWithDropFields { fn get_lints(&self) -> LintArray { - lint_array!(DROP_WITH_REPR_EXTERN) + lint_array!(UNIONS_WITH_DROP_FIELDS) } } -impl LateLintPass for DropWithReprExtern { - fn check_crate(&mut self, ctx: &LateContext, _: &hir::Crate) { - let drop_trait = match ctx.tcx.lang_items.drop_trait() { - Some(id) => ctx.tcx.lookup_trait_def(id), None => { return } - }; - drop_trait.for_each_impl(ctx.tcx, |drop_impl_did| { - if !drop_impl_did.is_local() { - return; - } - let dtor_self_type = ctx.tcx.lookup_item_type(drop_impl_did).ty; - - match dtor_self_type.sty { - ty::TyEnum(self_type_def, _) | - ty::TyStruct(self_type_def, _) => { - let self_type_did = self_type_def.did; - let hints = ctx.tcx.lookup_repr_hints(self_type_did); - if hints.iter().any(|attr| *attr == attr::ReprExtern) && - self_type_def.dtor_kind().has_drop_flag() { - let drop_impl_span = ctx.tcx.map.def_id_span(drop_impl_did, - syntax_pos::DUMMY_SP); - let self_defn_span = ctx.tcx.map.def_id_span(self_type_did, - syntax_pos::DUMMY_SP); - ctx.span_lint_note(DROP_WITH_REPR_EXTERN, - drop_impl_span, - "implementing Drop adds hidden state to types, \ - possibly conflicting with `#[repr(C)]`", - self_defn_span, - "the `#[repr(C)]` attribute is attached here"); - } +impl LateLintPass for UnionsWithDropFields { + fn check_item(&mut self, ctx: &LateContext, item: &hir::Item) { + if let hir::ItemUnion(ref vdata, _) = item.node { + let param_env = &ty::ParameterEnvironment::for_item(ctx.tcx, item.id); + for field in vdata.fields() { + let field_ty = ctx.tcx.node_id_to_type(field.id); + if ctx.tcx.type_needs_drop_given_env(field_ty, param_env) { + ctx.span_lint(UNIONS_WITH_DROP_FIELDS, + field.span, + "union contains a field with possibly non-trivial drop code, \ + drop code of union fields is ignored when dropping the union"); + return; } - _ => {} } - }) + } } } diff --git a/src/librustc_lint/lib.rs b/src/librustc_lint/lib.rs index cb0036eb5b..bc2979c806 100644 --- a/src/librustc_lint/lib.rs +++ b/src/librustc_lint/lib.rs @@ -31,6 +31,7 @@ #![cfg_attr(test, feature(test))] #![feature(box_patterns)] #![feature(box_syntax)] +#![feature(dotdot_in_tuple_patterns)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] @@ -127,8 +128,8 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { UnconditionalRecursion, InvalidNoMangleItems, PluginAsLibrary, - DropWithReprExtern, MutableTransmutes, + UnionsWithDropFields, ); add_builtin_with_new!(sess, @@ -192,14 +193,6 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { id: LintId::of(ILLEGAL_STRUCT_OR_ENUM_CONSTANT_PATTERN), reference: "RFC 1445 ", }, - FutureIncompatibleInfo { - id: LintId::of(UNSIZED_IN_TUPLE), - reference: "issue #33242 ", - }, - FutureIncompatibleInfo { - id: LintId::of(OBJECT_UNSAFE_FRAGMENT), - reference: "issue #33243 ", - }, FutureIncompatibleInfo { id: LintId::of(HR_LIFETIME_IN_ASSOC_TYPE), reference: "issue #33685 ", @@ -208,6 +201,10 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { id: LintId::of(LIFETIME_UNDERSCORE), reference: "RFC 1177 ", }, + FutureIncompatibleInfo { + id: LintId::of(SAFE_EXTERN_STATICS), + reference: "issue 36247 ", + }, ]); // Register renamed and removed lints @@ -218,4 +215,5 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { // This was renamed to raw_pointer_derive, which was then removed, // so it is also considered removed store.register_removed("raw_pointer_deriving", "using derive with raw pointers is ok"); + store.register_removed("drop_with_repr_extern", "drop flags have been removed"); } diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index 99df5c6e5f..1209ced8dd 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -12,7 +12,7 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; -use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::{self, AdtKind, Ty, TyCtxt}; use rustc::ty::layout::{Layout, Primitive}; use rustc::traits::Reveal; use middle::const_val::ConstVal; @@ -56,7 +56,6 @@ let Wrapping(x) = x; let y: usize = 1.wrapping_neg(); assert_eq!(x, y); ``` - "## } @@ -93,7 +92,7 @@ pub struct TypeLimits { impl TypeLimits { pub fn new() -> TypeLimits { TypeLimits { - negated_expr_id: !0, + negated_expr_id: ast::DUMMY_NODE_ID, } } } @@ -378,7 +377,8 @@ enum FfiResult { FfiSafe, FfiUnsafe(&'static str), FfiBadStruct(DefId, &'static str), - FfiBadEnum(DefId, &'static str) + FfiBadUnion(DefId, &'static str), + FfiBadEnum(DefId, &'static str), } /// Check if this enum can be safely exported based on the @@ -431,90 +431,112 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { } match ty.sty { - ty::TyStruct(def, substs) => { - if !cx.lookup_repr_hints(def.did).contains(&attr::ReprExtern) { - return FfiUnsafe( - "found struct without foreign-function-safe \ - representation annotation in foreign module, \ - consider adding a #[repr(C)] attribute to \ - the type"); - } + ty::TyAdt(def, substs) => match def.adt_kind() { + AdtKind::Struct => { + if !cx.lookup_repr_hints(def.did).contains(&attr::ReprExtern) { + return FfiUnsafe( + "found struct without foreign-function-safe \ + representation annotation in foreign module, \ + consider adding a #[repr(C)] attribute to \ + the type"); + } - // We can't completely trust repr(C) markings; make sure the - // fields are actually safe. - if def.struct_variant().fields.is_empty() { - return FfiUnsafe( - "found zero-size struct in foreign module, consider \ - adding a member to this struct"); - } + // We can't completely trust repr(C) markings; make sure the + // fields are actually safe. + if def.struct_variant().fields.is_empty() { + return FfiUnsafe( + "found zero-size struct in foreign module, consider \ + adding a member to this struct"); + } - for field in &def.struct_variant().fields { - let field_ty = cx.normalize_associated_type(&field.ty(cx, substs)); - let r = self.check_type_for_ffi(cache, field_ty); - match r { - FfiSafe => {} - FfiBadStruct(..) | FfiBadEnum(..) => { return r; } - FfiUnsafe(s) => { return FfiBadStruct(def.did, s); } + for field in &def.struct_variant().fields { + let field_ty = cx.normalize_associated_type(&field.ty(cx, substs)); + let r = self.check_type_for_ffi(cache, field_ty); + match r { + FfiSafe => {} + FfiBadStruct(..) | FfiBadUnion(..) | FfiBadEnum(..) => { return r; } + FfiUnsafe(s) => { return FfiBadStruct(def.did, s); } + } } + FfiSafe } - FfiSafe - } - ty::TyEnum(def, substs) => { - if def.variants.is_empty() { - // Empty enums are okay... although sort of useless. - return FfiSafe - } + AdtKind::Union => { + if !cx.lookup_repr_hints(def.did).contains(&attr::ReprExtern) { + return FfiUnsafe( + "found union without foreign-function-safe \ + representation annotation in foreign module, \ + consider adding a #[repr(C)] attribute to \ + the type"); + } - // Check for a repr() attribute to specify the size of the - // discriminant. - let repr_hints = cx.lookup_repr_hints(def.did); - match &repr_hints[..] { - &[] => { - // Special-case types like `Option`. - if !is_repr_nullable_ptr(cx, def, substs) { - return FfiUnsafe( - "found enum without foreign-function-safe \ - representation annotation in foreign module, \ - consider adding a #[repr(...)] attribute to \ - the type") + for field in &def.struct_variant().fields { + let field_ty = cx.normalize_associated_type(&field.ty(cx, substs)); + let r = self.check_type_for_ffi(cache, field_ty); + match r { + FfiSafe => {} + FfiBadStruct(..) | FfiBadUnion(..) | FfiBadEnum(..) => { return r; } + FfiUnsafe(s) => { return FfiBadUnion(def.did, s); } } } - &[ref hint] => { - if !hint.is_ffi_safe() { + FfiSafe + } + AdtKind::Enum => { + if def.variants.is_empty() { + // Empty enums are okay... although sort of useless. + return FfiSafe + } + + // Check for a repr() attribute to specify the size of the + // discriminant. + let repr_hints = cx.lookup_repr_hints(def.did); + match &repr_hints[..] { + &[] => { + // Special-case types like `Option`. + if !is_repr_nullable_ptr(cx, def, substs) { + return FfiUnsafe( + "found enum without foreign-function-safe \ + representation annotation in foreign module, \ + consider adding a #[repr(...)] attribute to \ + the type") + } + } + &[ref hint] => { + if !hint.is_ffi_safe() { + // FIXME: This shouldn't be reachable: we should check + // this earlier. + return FfiUnsafe( + "enum has unexpected #[repr(...)] attribute") + } + + // Enum with an explicitly sized discriminant; either + // a C-style enum or a discriminated union. + + // The layout of enum variants is implicitly repr(C). + // FIXME: Is that correct? + } + _ => { // FIXME: This shouldn't be reachable: we should check // this earlier. return FfiUnsafe( - "enum has unexpected #[repr(...)] attribute") + "enum has too many #[repr(...)] attributes"); } - - // Enum with an explicitly sized discriminant; either - // a C-style enum or a discriminated union. - - // The layout of enum variants is implicitly repr(C). - // FIXME: Is that correct? } - _ => { - // FIXME: This shouldn't be reachable: we should check - // this earlier. - return FfiUnsafe( - "enum has too many #[repr(...)] attributes"); - } - } - // Check the contained variants. - for variant in &def.variants { - for field in &variant.fields { - let arg = cx.normalize_associated_type(&field.ty(cx, substs)); - let r = self.check_type_for_ffi(cache, arg); - match r { - FfiSafe => {} - FfiBadStruct(..) | FfiBadEnum(..) => { return r; } - FfiUnsafe(s) => { return FfiBadEnum(def.did, s); } + // Check the contained variants. + for variant in &def.variants { + for field in &variant.fields { + let arg = cx.normalize_associated_type(&field.ty(cx, substs)); + let r = self.check_type_for_ffi(cache, arg); + match r { + FfiSafe => {} + FfiBadStruct(..) | FfiBadUnion(..) | FfiBadEnum(..) => { return r; } + FfiUnsafe(s) => { return FfiBadEnum(def.did, s); } + } } } + FfiSafe } - FfiSafe - } + }, ty::TyChar => { FfiUnsafe("found Rust type `char` in foreign module, while \ @@ -547,7 +569,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { ty::TyTuple(_) => { FfiUnsafe("found Rust tuple type in foreign module; \ - consider using a struct instead`") + consider using a struct instead") } ty::TyRawPtr(ref m) | ty::TyRef(_, ref m) => { @@ -615,6 +637,13 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { &format!("found non-foreign-function-safe member in \ struct marked #[repr(C)]: {}", s)); } + FfiResult::FfiBadUnion(_, s) => { + // FIXME: This diagnostic is difficult to read, and doesn't + // point at the relevant field. + self.cx.span_lint(IMPROPER_CTYPES, sp, + &format!("found non-foreign-function-safe member in \ + union marked #[repr(C)]: {}", s)); + } FfiResult::FfiBadEnum(_, s) => { // FIXME: This diagnostic is difficult to read, and doesn't // point at the relevant variant. @@ -692,7 +721,7 @@ impl LateLintPass for VariantSizeDifferences { if let hir::ItemEnum(ref enum_definition, ref gens) = it.node { if gens.ty_params.is_empty() { // sizes only make sense for non-generic types let t = cx.tcx.node_id_to_type(it.id); - let layout = cx.tcx.normalizing_infer_ctxt(Reveal::All).enter(|infcx| { + let layout = cx.tcx.infer_ctxt(None, None, Reveal::All).enter(|infcx| { let ty = cx.tcx.erase_regions(&t); ty.layout(&infcx).unwrap_or_else(|e| { bug!("failed to get layout for `{}`: {}", t, e) diff --git a/src/librustc_lint/unused.rs b/src/librustc_lint/unused.rs index 57705301aa..d31f16df69 100644 --- a/src/librustc_lint/unused.rs +++ b/src/librustc_lint/unused.rs @@ -18,8 +18,9 @@ use lint::{LintPass, EarlyLintPass, LateLintPass}; use std::collections::hash_map::Entry::{Occupied, Vacant}; use syntax::ast; -use syntax::attr::{self, AttrMetaMethods}; +use syntax::attr; use syntax::feature_gate::{KNOWN_ATTRIBUTES, AttributeType}; +use syntax::parse::token::keywords; use syntax::ptr::P; use syntax_pos::Span; @@ -135,8 +136,7 @@ impl LateLintPass for UnusedResults { ty::TyTuple(ref tys) if tys.is_empty() => return, ty::TyNever => return, ty::TyBool => return, - ty::TyStruct(def, _) | - ty::TyEnum(def, _) => { + ty::TyAdt(def, _) => { let attrs = cx.tcx.get_attrs(def.did); check_must_use(cx, &attrs[..], s.span) } @@ -234,10 +234,13 @@ impl LintPass for UnusedAttributes { impl LateLintPass for UnusedAttributes { fn check_attribute(&mut self, cx: &LateContext, attr: &ast::Attribute) { + debug!("checking attribute: {:?}", attr); + // Note that check_name() marks the attribute as used if it matches. for &(ref name, ty, _) in KNOWN_ATTRIBUTES { match ty { AttributeType::Whitelisted if attr.check_name(name) => { + debug!("{:?} is Whitelisted", name); break; }, _ => () @@ -247,11 +250,13 @@ impl LateLintPass for UnusedAttributes { let plugin_attributes = cx.sess().plugin_attributes.borrow_mut(); for &(ref name, ty) in plugin_attributes.iter() { if ty == AttributeType::Whitelisted && attr.check_name(&name) { + debug!("{:?} (plugin attr) is whitelisted with ty {:?}", name, ty); break; } } if !attr::is_used(attr) { + debug!("Emitting warning for: {:?}", attr); cx.span_lint(UNUSED_ATTRIBUTES, attr.span, "unused attribute"); // Is it a builtin attribute that must be used at the crate level? let known_crate = KNOWN_ATTRIBUTES.iter().find(|&&(name, ty, _)| { @@ -275,6 +280,8 @@ impl LateLintPass for UnusedAttributes { }; cx.span_lint(UNUSED_ATTRIBUTES, attr.span, msg); } + } else { + debug!("Attr was used: {:?}", attr); } } } @@ -325,7 +332,7 @@ impl UnusedParens { contains_exterior_struct_lit(&x) } - ast::ExprKind::MethodCall(_, _, ref exprs) => { + ast::ExprKind::MethodCall(.., ref exprs) => { // X { y: 1 }.bar(...) contains_exterior_struct_lit(&exprs[0]) } @@ -346,15 +353,15 @@ impl EarlyLintPass for UnusedParens { fn check_expr(&mut self, cx: &EarlyContext, e: &ast::Expr) { use syntax::ast::ExprKind::*; let (value, msg, struct_lit_needs_parens) = match e.node { - If(ref cond, _, _) => (cond, "`if` condition", true), - While(ref cond, _, _) => (cond, "`while` condition", true), - IfLet(_, ref cond, _, _) => (cond, "`if let` head expression", true), - WhileLet(_, ref cond, _, _) => (cond, "`while let` head expression", true), - ForLoop(_, ref cond, _, _) => (cond, "`for` head expression", true), + If(ref cond, ..) => (cond, "`if` condition", true), + While(ref cond, ..) => (cond, "`while` condition", true), + IfLet(_, ref cond, ..) => (cond, "`if let` head expression", true), + WhileLet(_, ref cond, ..) => (cond, "`while let` head expression", true), + ForLoop(_, ref cond, ..) => (cond, "`for` head expression", true), Match(ref head, _) => (head, "`match` head expression", true), Ret(Some(ref value)) => (value, "`return` value", false), Assign(_, ref value) => (value, "assigned value", false), - AssignOp(_, _, ref value) => (value, "assigned value", false), + AssignOp(.., ref value) => (value, "assigned value", false), InPlace(_, ref value) => (value, "emplacement value", false), _ => return }; @@ -392,13 +399,9 @@ impl LateLintPass for UnusedImportBraces { fn check_item(&mut self, cx: &LateContext, item: &hir::Item) { if let hir::ItemUse(ref view_path) = item.node { if let hir::ViewPathList(_, ref items) = view_path.node { - if items.len() == 1 { - if let hir::PathListIdent {ref name, ..} = items[0].node { - let m = format!("braces around {} is unnecessary", - name); - cx.span_lint(UNUSED_IMPORT_BRACES, item.span, - &m[..]); - } + if items.len() == 1 && items[0].node.name != keywords::SelfValue.name() { + let msg = format!("braces around {} is unnecessary", items[0].node.name); + cx.span_lint(UNUSED_IMPORT_BRACES, item.span, &msg); } } } diff --git a/src/librustc_llvm/build.rs b/src/librustc_llvm/build.rs index 5f7a0f788c..3f551476e2 100644 --- a/src/librustc_llvm/build.rs +++ b/src/librustc_llvm/build.rs @@ -20,7 +20,7 @@ use build_helper::output; fn main() { println!("cargo:rustc-cfg=cargobuild"); - let target = env::var("TARGET").unwrap(); + let target = env::var("TARGET").expect("TARGET was not set"); let llvm_config = env::var_os("LLVM_CONFIG") .map(PathBuf::from) .unwrap_or_else(|| { @@ -62,11 +62,11 @@ fn main() { // can't trust all the output of llvm-config becaues it might be targeted // for the host rather than the target. As a result a bunch of blocks below // are gated on `if !is_crossed` - let target = env::var("TARGET").unwrap(); - let host = env::var("HOST").unwrap(); + let target = env::var("TARGET").expect("TARGET was not set"); + let host = env::var("HOST").expect("HOST was not set"); let is_crossed = target != host; - let optional_components = ["x86", "arm", "aarch64", "mips", "powerpc", "pnacl"]; + let optional_components = ["x86", "arm", "aarch64", "mips", "powerpc", "pnacl", "systemz"]; // FIXME: surely we don't need all these components, right? Stuff like mcjit // or interpreter the compiler itself never uses. diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index b2ffcac365..50c68d5e75 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -42,16 +42,12 @@ pub enum CallConv { ColdCallConv = 9, X86StdcallCallConv = 64, X86FastcallCallConv = 65, + X86_64_SysV = 78, X86_64_Win64 = 79, X86_VectorCall = 80 } -/// LLVMLinkage -/// -/// This enum omits the obsolete (and no-op) linkage types DLLImportLinkage, -/// DLLExportLinkage, GhostLinkage and LinkOnceODRAutoHideLinkage. -/// LinkerPrivateLinkage and LinkerPrivateWeakLinkage are not included either; -/// they've been removed in upstream LLVM commit r203866. +/// LLVMRustLinkage #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] #[repr(C)] pub enum Linkage { @@ -59,13 +55,13 @@ pub enum Linkage { AvailableExternallyLinkage = 1, LinkOnceAnyLinkage = 2, LinkOnceODRLinkage = 3, - WeakAnyLinkage = 5, - WeakODRLinkage = 6, - AppendingLinkage = 7, - InternalLinkage = 8, - PrivateLinkage = 9, - ExternalWeakLinkage = 12, - CommonLinkage = 14, + WeakAnyLinkage = 4, + WeakODRLinkage = 5, + AppendingLinkage = 6, + InternalLinkage = 7, + PrivateLinkage = 8, + ExternalWeakLinkage = 9, + CommonLinkage = 10, } /// LLVMDiagnosticSeverity @@ -252,8 +248,7 @@ pub enum FileType { ObjectFile, } -/// Enum pinned in LLVMContext, used in -/// LLVMSetMetadata so ABI-stable. +/// LLVMMetadataType #[derive(Copy, Clone)] #[repr(C)] pub enum MetadataType { @@ -820,8 +815,8 @@ extern { /* Operations on global variables, functions, and aliases (globals) */ pub fn LLVMGetGlobalParent(Global: ValueRef) -> ModuleRef; pub fn LLVMIsDeclaration(Global: ValueRef) -> Bool; - pub fn LLVMGetLinkage(Global: ValueRef) -> c_uint; - pub fn LLVMSetLinkage(Global: ValueRef, Link: Linkage); + pub fn LLVMRustGetLinkage(Global: ValueRef) -> Linkage; + pub fn LLVMRustSetLinkage(Global: ValueRef, RustLinkage: Linkage); pub fn LLVMGetSection(Global: ValueRef) -> *const c_char; pub fn LLVMSetSection(Global: ValueRef, Section: *const c_char); pub fn LLVMGetVisibility(Global: ValueRef) -> c_uint; @@ -1796,6 +1791,11 @@ extern { Col: c_uint) -> DILexicalBlock; + pub fn LLVMRustDIBuilderCreateLexicalBlockFile(Builder: DIBuilderRef, + Scope: DIScope, + File: DIFile) + -> DILexicalBlock; + pub fn LLVMRustDIBuilderCreateStaticVariable(Builder: DIBuilderRef, Context: DIScope, Name: *const c_char, diff --git a/src/librustc_llvm/lib.rs b/src/librustc_llvm/lib.rs index 6c4e1a54ea..eb45d3d25c 100644 --- a/src/librustc_llvm/lib.rs +++ b/src/librustc_llvm/lib.rs @@ -428,6 +428,12 @@ pub fn initialize_available_targets() { LLVMInitializePNaClTargetInfo, LLVMInitializePNaClTarget, LLVMInitializePNaClTargetMC); + init_target!(llvm_component = "systemz", + LLVMInitializeSystemZTargetInfo, + LLVMInitializeSystemZTarget, + LLVMInitializeSystemZTargetMC, + LLVMInitializeSystemZAsmPrinter, + LLVMInitializeSystemZAsmParser); } pub fn last_error() -> Option { diff --git a/src/librbml/Cargo.toml b/src/librustc_macro/Cargo.toml similarity index 57% rename from src/librbml/Cargo.toml rename to src/librustc_macro/Cargo.toml index ab89ac2b7a..6b3ee21d9a 100644 --- a/src/librbml/Cargo.toml +++ b/src/librustc_macro/Cargo.toml @@ -1,13 +1,12 @@ [package] authors = ["The Rust Project Developers"] -name = "rbml" +name = "rustc_macro" version = "0.0.0" [lib] -name = "rbml" +name = "rustc_macro" path = "lib.rs" crate-type = ["dylib"] [dependencies] -log = { path = "../liblog" } -serialize = { path = "../libserialize" } +syntax = { path = "../libsyntax" } diff --git a/src/librustc_macro/lib.rs b/src/librustc_macro/lib.rs new file mode 100644 index 0000000000..c2a2cc2ecd --- /dev/null +++ b/src/librustc_macro/lib.rs @@ -0,0 +1,169 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A support library for macro authors when defining new macros. +//! +//! This library, provided by the standard distribution, provides the types +//! consumed in the interfaces of procedurally defined macro definitions. +//! Currently the primary use of this crate is to provide the ability to define +//! new custom derive modes through `#[rustc_macro_derive]`. +//! +//! Added recently as part of [RFC 1681] this crate is currently *unstable* and +//! requires the `#![feature(rustc_macro_lib)]` directive to use. Eventually, +//! though, it is intended for this crate to become stable to use (perhaps under +//! a different name). +//! +//! [RFC 1681]: https://github.com/rust-lang/rfcs/blob/master/text/1681-macros-1.1.md +//! +//! Note that this crate is intentionally very bare-bones currently. The main +//! type, `TokenStream`, only supports `fmt::Display` and `FromStr` +//! implementations, indicating that it can only go to and come from a string. +//! This functionality is intended to be expanded over time as more surface +//! area for macro authors is stabilized. + +#![crate_name = "rustc_macro"] +#![unstable(feature = "rustc_macro_lib", issue = "27812")] +#![crate_type = "rlib"] +#![crate_type = "dylib"] +#![cfg_attr(not(stage0), deny(warnings))] +#![deny(missing_docs)] + +#![feature(rustc_private)] +#![feature(staged_api)] +#![feature(lang_items)] + +extern crate syntax; + +use std::fmt; +use std::str::FromStr; + +use syntax::ast; +use syntax::parse; +use syntax::ptr::P; + +/// The main type provided by this crate, representing an abstract stream of +/// tokens. +/// +/// This is both the input and output of `#[rustc_macro_derive]` definitions. +/// Currently it's required to be a list of valid Rust items, but this +/// restriction may be lifted in the future. +/// +/// The API of this type is intentionally bare-bones, but it'll be expanded over +/// time! +pub struct TokenStream { + inner: Vec>, +} + +/// Error returned from `TokenStream::from_str`. +#[derive(Debug)] +pub struct LexError { + _inner: (), +} + +/// Permanently unstable internal implementation details of this crate. This +/// should not be used. +/// +/// These methods are used by the rest of the compiler to generate instances of +/// `TokenStream` to hand to macro definitions, as well as consume the output. +/// +/// Note that this module is also intentionally separate from the rest of the +/// crate. This allows the `#[unstable]` directive below to naturally apply to +/// all of the contents. +#[unstable(feature = "rustc_macro_internals", issue = "27812")] +#[doc(hidden)] +pub mod __internal { + use std::cell::Cell; + + use syntax::ast; + use syntax::ptr::P; + use syntax::parse::ParseSess; + use super::TokenStream; + + pub fn new_token_stream(item: P) -> TokenStream { + TokenStream { inner: vec![item] } + } + + pub fn token_stream_items(stream: TokenStream) -> Vec> { + stream.inner + } + + pub trait Registry { + fn register_custom_derive(&mut self, + trait_name: &str, + expand: fn(TokenStream) -> TokenStream); + } + + // Emulate scoped_thread_local!() here essentially + thread_local! { + static CURRENT_SESS: Cell<*const ParseSess> = Cell::new(0 as *const _); + } + + pub fn set_parse_sess(sess: &ParseSess, f: F) -> R + where F: FnOnce() -> R + { + struct Reset { prev: *const ParseSess } + + impl Drop for Reset { + fn drop(&mut self) { + CURRENT_SESS.with(|p| p.set(self.prev)); + } + } + + CURRENT_SESS.with(|p| { + let _reset = Reset { prev: p.get() }; + p.set(sess); + f() + }) + } + + pub fn with_parse_sess(f: F) -> R + where F: FnOnce(&ParseSess) -> R + { + let p = CURRENT_SESS.with(|p| p.get()); + assert!(!p.is_null()); + f(unsafe { &*p }) + } +} + +impl FromStr for TokenStream { + type Err = LexError; + + fn from_str(src: &str) -> Result { + __internal::with_parse_sess(|sess| { + let src = src.to_string(); + let cfg = Vec::new(); + let name = "rustc-macro source code".to_string(); + let mut parser = parse::new_parser_from_source_str(sess, cfg, name, + src); + let mut ret = TokenStream { inner: Vec::new() }; + loop { + match parser.parse_item() { + Ok(Some(item)) => ret.inner.push(item), + Ok(None) => return Ok(ret), + Err(mut err) => { + err.cancel(); + return Err(LexError { _inner: () }) + } + } + } + }) + } +} + +impl fmt::Display for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for item in self.inner.iter() { + let item = syntax::print::pprust::item_to_string(item); + try!(f.write_str(&item)); + try!(f.write_str("\n")); + } + Ok(()) + } +} diff --git a/src/librustc_metadata/Cargo.toml b/src/librustc_metadata/Cargo.toml index 2d3302c2ee..680d55955b 100644 --- a/src/librustc_metadata/Cargo.toml +++ b/src/librustc_metadata/Cargo.toml @@ -11,14 +11,14 @@ crate-type = ["dylib"] [dependencies] flate = { path = "../libflate" } log = { path = "../liblog" } -rbml = { path = "../librbml" } rustc = { path = "../librustc" } rustc_back = { path = "../librustc_back" } -rustc_bitflags = { path = "../librustc_bitflags" } rustc_const_math = { path = "../librustc_const_math" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_errors = { path = "../librustc_errors" } rustc_llvm = { path = "../librustc_llvm" } +rustc_macro = { path = "../librustc_macro" } serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } -syntax_pos = { path = "../libsyntax_pos" } \ No newline at end of file +syntax_ext = { path = "../libsyntax_ext" } +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_metadata/astencode.rs b/src/librustc_metadata/astencode.rs index b8e66530ea..c9dbedacbc 100644 --- a/src/librustc_metadata/astencode.rs +++ b/src/librustc_metadata/astencode.rs @@ -8,1422 +8,151 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_camel_case_types)] -// FIXME: remove this after snapshot, and Results are handled -#![allow(unused_must_use)] - use rustc::hir::map as ast_map; -use rustc::session::Session; -use rustc::hir; -use rustc::hir::fold; -use rustc::hir::fold::Folder; use rustc::hir::intravisit::{Visitor, IdRangeComputingVisitor, IdRange}; -use common as c; -use cstore; -use decoder; -use encoder as e; -use tydecode; -use tyencode; +use cstore::CrateMetadata; +use encoder::EncodeContext; +use schema::*; -use middle::cstore::{InlinedItem, InlinedItemRef}; -use rustc::ty::adjustment; -use rustc::ty::cast; -use middle::const_qualif::ConstQualif; +use rustc::middle::cstore::{InlinedItem, InlinedItemRef}; +use rustc::middle::const_qualif::ConstQualif; use rustc::hir::def::{self, Def}; use rustc::hir::def_id::DefId; -use middle::region; -use rustc::ty::subst; -use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::{self, TyCtxt, Ty}; use syntax::ast; -use syntax::ptr::P; -use syntax_pos; - -use std::cell::Cell; -use std::io::SeekFrom; -use std::io::prelude::*; -use std::fmt::Debug; - -use rbml::reader; -use rbml::writer::Encoder; -use rbml; -use rustc_serialize as serialize; -use rustc_serialize::{Decodable, Decoder, DecoderHelpers}; -use rustc_serialize::{Encodable, EncoderHelpers}; - -#[cfg(test)] use std::io::Cursor; -#[cfg(test)] use syntax::parse; -#[cfg(test)] use rustc::hir::print as pprust; -#[cfg(test)] use rustc::hir::lowering::{LoweringContext, DummyResolver}; - -struct DecodeContext<'a, 'b, 'tcx: 'a> { - tcx: TyCtxt<'a, 'tcx, 'tcx>, - cdata: &'b cstore::CrateMetadata, - from_id_range: IdRange, - to_id_range: IdRange, - // Cache the last used filemap for translating spans as an optimization. - last_filemap_index: Cell, -} - -trait tr { - fn tr(&self, dcx: &DecodeContext) -> Self; -} - -// ______________________________________________________________________ -// Top-level methods. - -pub fn encode_inlined_item(ecx: &e::EncodeContext, - rbml_w: &mut Encoder, - ii: InlinedItemRef) { - let id = match ii { - InlinedItemRef::Item(_, i) => i.id, - InlinedItemRef::Foreign(_, i) => i.id, - InlinedItemRef::TraitItem(_, ti) => ti.id, - InlinedItemRef::ImplItem(_, ii) => ii.id, - }; - debug!("> Encoding inlined item: {} ({:?})", - ecx.tcx.node_path_str(id), - rbml_w.writer.seek(SeekFrom::Current(0))); - - // Folding could be avoided with a smarter encoder. - let (ii, expected_id_range) = simplify_ast(ii); - let id_range = inlined_item_id_range(&ii); - assert_eq!(expected_id_range, id_range); - - rbml_w.start_tag(c::tag_ast as usize); - id_range.encode(rbml_w); - encode_ast(rbml_w, &ii); - encode_side_tables_for_ii(ecx, rbml_w, &ii); - rbml_w.end_tag(); - - debug!("< Encoded inlined fn: {} ({:?})", - ecx.tcx.node_path_str(id), - rbml_w.writer.seek(SeekFrom::Current(0))); -} - -impl<'a, 'b, 'c, 'tcx> ast_map::FoldOps for &'a DecodeContext<'b, 'c, 'tcx> { - fn new_id(&self, id: ast::NodeId) -> ast::NodeId { - if id == ast::DUMMY_NODE_ID { - // Used by ast_map to map the NodeInlinedParent. - self.tcx.sess.next_node_id() - } else { - self.tr_id(id) - } - } - fn new_def_id(&self, def_id: DefId) -> DefId { - self.tr_def_id(def_id) - } - fn new_span(&self, span: syntax_pos::Span) -> syntax_pos::Span { - self.tr_span(span) - } -} - -/// Decodes an item from its AST in the cdata's metadata and adds it to the -/// ast-map. -pub fn decode_inlined_item<'a, 'tcx>(cdata: &cstore::CrateMetadata, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - parent_def_path: ast_map::DefPath, - parent_did: DefId, - ast_doc: rbml::Doc, - orig_did: DefId) - -> &'tcx InlinedItem { - debug!("> Decoding inlined fn: {:?}", tcx.item_path_str(orig_did)); - let mut ast_dsr = reader::Decoder::new(ast_doc); - let from_id_range = Decodable::decode(&mut ast_dsr).unwrap(); - let to_id_range = reserve_id_range(&tcx.sess, from_id_range); - let dcx = &DecodeContext { - cdata: cdata, - tcx: tcx, - from_id_range: from_id_range, - to_id_range: to_id_range, - last_filemap_index: Cell::new(0) - }; - let ii = ast_map::map_decoded_item(&dcx.tcx.map, - parent_def_path, - parent_did, - decode_ast(ast_doc), - dcx); - let name = match *ii { - InlinedItem::Item(_, ref i) => i.name, - InlinedItem::Foreign(_, ref i) => i.name, - InlinedItem::TraitItem(_, ref ti) => ti.name, - InlinedItem::ImplItem(_, ref ii) => ii.name - }; - debug!("Fn named: {}", name); - debug!("< Decoded inlined fn: {}::{}", - tcx.item_path_str(parent_did), - name); - region::resolve_inlined_item(&tcx.sess, &tcx.region_maps, ii); - decode_side_tables(dcx, ast_doc); - copy_item_types(dcx, ii, orig_did); - if let InlinedItem::Item(_, ref i) = *ii { - debug!(">>> DECODED ITEM >>>\n{}\n<<< DECODED ITEM <<<", - ::rustc::hir::print::item_to_string(&i)); - } - - ii -} - -// ______________________________________________________________________ -// Enumerating the IDs which appear in an AST - -fn reserve_id_range(sess: &Session, - from_id_range: IdRange) -> IdRange { - // Handle the case of an empty range: - if from_id_range.empty() { return from_id_range; } - let cnt = from_id_range.max - from_id_range.min; - let to_id_min = sess.reserve_node_ids(cnt); - let to_id_max = to_id_min + cnt; - IdRange { min: to_id_min, max: to_id_max } -} - -impl<'a, 'b, 'tcx> DecodeContext<'a, 'b, 'tcx> { - /// Translates an internal id, meaning a node id that is known to refer to some part of the - /// item currently being inlined, such as a local variable or argument. All naked node-ids - /// that appear in types have this property, since if something might refer to an external item - /// we would use a def-id to allow for the possibility that the item resides in another crate. - pub fn tr_id(&self, id: ast::NodeId) -> ast::NodeId { - // from_id_range should be non-empty - assert!(!self.from_id_range.empty()); - // Make sure that translating the NodeId will actually yield a - // meaningful result - assert!(self.from_id_range.contains(id)); - - // Use wrapping arithmetic because otherwise it introduces control flow. - // Maybe we should just have the control flow? -- aatch - (id.wrapping_sub(self.from_id_range.min).wrapping_add(self.to_id_range.min)) - } - - /// Translates an EXTERNAL def-id, converting the crate number from the one used in the encoded - /// data to the current crate numbers.. By external, I mean that it be translated to a - /// reference to the item in its original crate, as opposed to being translated to a reference - /// to the inlined version of the item. This is typically, but not always, what you want, - /// because most def-ids refer to external things like types or other fns that may or may not - /// be inlined. Note that even when the inlined function is referencing itself recursively, we - /// would want `tr_def_id` for that reference--- conceptually the function calls the original, - /// non-inlined version, and trans deals with linking that recursive call to the inlined copy. - pub fn tr_def_id(&self, did: DefId) -> DefId { - decoder::translate_def_id(self.cdata, did) - } - - /// Translates a `Span` from an extern crate to the corresponding `Span` - /// within the local crate's codemap. - pub fn tr_span(&self, span: syntax_pos::Span) -> syntax_pos::Span { - decoder::translate_span(self.cdata, - self.tcx.sess.codemap(), - &self.last_filemap_index, - span) - } -} - -impl tr for DefId { - fn tr(&self, dcx: &DecodeContext) -> DefId { - dcx.tr_def_id(*self) - } -} - -impl tr for Option { - fn tr(&self, dcx: &DecodeContext) -> Option { - self.map(|d| dcx.tr_def_id(d)) - } -} - -impl tr for syntax_pos::Span { - fn tr(&self, dcx: &DecodeContext) -> syntax_pos::Span { - dcx.tr_span(*self) - } -} - -trait def_id_encoder_helpers { - fn emit_def_id(&mut self, did: DefId); -} - -impl def_id_encoder_helpers for S - where ::Error: Debug -{ - fn emit_def_id(&mut self, did: DefId) { - did.encode(self).unwrap() - } -} - -trait def_id_decoder_helpers { - fn read_def_id(&mut self, dcx: &DecodeContext) -> DefId; - fn read_def_id_nodcx(&mut self, - cdata: &cstore::CrateMetadata) -> DefId; -} - -impl def_id_decoder_helpers for D - where ::Error: Debug -{ - fn read_def_id(&mut self, dcx: &DecodeContext) -> DefId { - let did: DefId = Decodable::decode(self).unwrap(); - did.tr(dcx) - } - - fn read_def_id_nodcx(&mut self, - cdata: &cstore::CrateMetadata) - -> DefId { - let did: DefId = Decodable::decode(self).unwrap(); - decoder::translate_def_id(cdata, did) - } -} - -// ______________________________________________________________________ -// Encoding and decoding the AST itself -// -// When decoding, we have to renumber the AST so that the node ids that -// appear within are disjoint from the node ids in our existing ASTs. -// We also have to adjust the spans: for now we just insert a dummy span, -// but eventually we should add entries to the local codemap as required. - -fn encode_ast(rbml_w: &mut Encoder, item: &InlinedItem) { - rbml_w.start_tag(c::tag_tree as usize); - rbml_w.emit_opaque(|this| item.encode(this)); - rbml_w.end_tag(); -} - -struct NestedItemsDropper { - id_range: IdRange -} - -impl Folder for NestedItemsDropper { - // The unit tests below run on HIR with NodeIds not properly assigned. That - // causes an integer overflow. So we just don't track the id_range when - // building the unit tests. - #[cfg(not(test))] - fn new_id(&mut self, id: ast::NodeId) -> ast::NodeId { - // Record the range of NodeIds we are visiting, so we can do a sanity - // check later - self.id_range.add(id); - id - } - - fn fold_block(&mut self, blk: P) -> P { - blk.and_then(|hir::Block {id, stmts, expr, rules, span, ..}| { - let stmts_sans_items = stmts.into_iter().filter_map(|stmt| { - let use_stmt = match stmt.node { - hir::StmtExpr(_, _) | hir::StmtSemi(_, _) => true, - hir::StmtDecl(ref decl, _) => { - match decl.node { - hir::DeclLocal(_) => true, - hir::DeclItem(_) => false, - } - } - }; - if use_stmt { - Some(stmt) - } else { - None - } - }).collect(); - let blk_sans_items = P(hir::Block { - stmts: stmts_sans_items, - expr: expr, - id: id, - rules: rules, - span: span, - }); - fold::noop_fold_block(blk_sans_items, self) - }) - } -} - -// Produces a simplified copy of the AST which does not include things -// that we do not need to or do not want to export. For example, we -// do not include any nested items: if these nested items are to be -// inlined, their AST will be exported separately (this only makes -// sense because, in Rust, nested items are independent except for -// their visibility). -// -// As it happens, trans relies on the fact that we do not export -// nested items, as otherwise it would get confused when translating -// inlined items. -fn simplify_ast(ii: InlinedItemRef) -> (InlinedItem, IdRange) { - let mut fld = NestedItemsDropper { - id_range: IdRange::max() - }; - - let ii = match ii { - // HACK we're not dropping items. - InlinedItemRef::Item(d, i) => { - InlinedItem::Item(d, P(fold::noop_fold_item(i.clone(), &mut fld))) - } - InlinedItemRef::TraitItem(d, ti) => { - InlinedItem::TraitItem(d, P(fold::noop_fold_trait_item(ti.clone(), &mut fld))) - } - InlinedItemRef::ImplItem(d, ii) => { - InlinedItem::ImplItem(d, P(fold::noop_fold_impl_item(ii.clone(), &mut fld))) - } - InlinedItemRef::Foreign(d, i) => { - InlinedItem::Foreign(d, P(fold::noop_fold_foreign_item(i.clone(), &mut fld))) - } - }; - - (ii, fld.id_range) -} +use rustc_serialize::Encodable; -fn decode_ast(item_doc: rbml::Doc) -> InlinedItem { - let chi_doc = item_doc.get(c::tag_tree as usize); - let mut rbml_r = reader::Decoder::new(chi_doc); - rbml_r.read_opaque(|decoder, _| Decodable::decode(decoder)).unwrap() +#[derive(RustcEncodable, RustcDecodable)] +pub struct Ast<'tcx> { + id_range: IdRange, + item: Lazy, + side_tables: LazySeq<(ast::NodeId, TableEntry<'tcx>)> } -// ______________________________________________________________________ -// Encoding and decoding of ast::def - -fn decode_def(dcx: &DecodeContext, dsr: &mut reader::Decoder) -> Def { - let def: Def = Decodable::decode(dsr).unwrap(); - def.tr(dcx) +#[derive(RustcEncodable, RustcDecodable)] +enum TableEntry<'tcx> { + Def(Def), + NodeType(Ty<'tcx>), + ItemSubsts(ty::ItemSubsts<'tcx>), + Adjustment(ty::adjustment::AutoAdjustment<'tcx>), + ConstQualif(ConstQualif) } -impl tr for Def { - fn tr(&self, dcx: &DecodeContext) -> Def { - match *self { - Def::Fn(did) => Def::Fn(did.tr(dcx)), - Def::Method(did) => Def::Method(did.tr(dcx)), - Def::SelfTy(opt_did, impl_id) => { - // Since the impl_id will never lie within the reserved range of - // imported NodeIds, it does not make sense to translate it. - // The result would not make any sense within the importing crate. - // We also don't allow for impl items to be inlined (just their - // members), so even if we had a DefId here, we wouldn't be able - // to do much with it. - // So, we set the id to DUMMY_NODE_ID. That way we make it - // explicit that this is no usable NodeId. - Def::SelfTy(opt_did.map(|did| did.tr(dcx)), - impl_id.map(|_| ast::DUMMY_NODE_ID)) - } - Def::Mod(did) => { Def::Mod(did.tr(dcx)) } - Def::ForeignMod(did) => { Def::ForeignMod(did.tr(dcx)) } - Def::Static(did, m) => { Def::Static(did.tr(dcx), m) } - Def::Const(did) => { Def::Const(did.tr(dcx)) } - Def::AssociatedConst(did) => Def::AssociatedConst(did.tr(dcx)), - Def::Local(_, nid) => { - let nid = dcx.tr_id(nid); - let did = dcx.tcx.map.local_def_id(nid); - Def::Local(did, nid) - } - Def::Variant(e_did, v_did) => Def::Variant(e_did.tr(dcx), v_did.tr(dcx)), - Def::Trait(did) => Def::Trait(did.tr(dcx)), - Def::Enum(did) => Def::Enum(did.tr(dcx)), - Def::TyAlias(did) => Def::TyAlias(did.tr(dcx)), - Def::AssociatedTy(trait_did, did) => - Def::AssociatedTy(trait_did.tr(dcx), did.tr(dcx)), - Def::PrimTy(p) => Def::PrimTy(p), - Def::TyParam(s, index, def_id, n) => Def::TyParam(s, index, def_id.tr(dcx), n), - Def::Upvar(_, nid1, index, nid2) => { - let nid1 = dcx.tr_id(nid1); - let nid2 = dcx.tr_id(nid2); - let did1 = dcx.tcx.map.local_def_id(nid1); - Def::Upvar(did1, nid1, index, nid2) - } - Def::Struct(did) => Def::Struct(did.tr(dcx)), - Def::Label(nid) => Def::Label(dcx.tr_id(nid)), - Def::Err => Def::Err, +impl<'a, 'tcx> EncodeContext<'a, 'tcx> { + pub fn encode_inlined_item(&mut self, ii: InlinedItemRef) -> Lazy> { + let mut id_visitor = IdRangeComputingVisitor::new(); + match ii { + InlinedItemRef::Item(_, i) => id_visitor.visit_item(i), + InlinedItemRef::TraitItem(_, ti) => id_visitor.visit_trait_item(ti), + InlinedItemRef::ImplItem(_, ii) => id_visitor.visit_impl_item(ii) } - } -} - -// ______________________________________________________________________ -// Encoding and decoding of freevar information -fn encode_freevar_entry(rbml_w: &mut Encoder, fv: &hir::Freevar) { - (*fv).encode(rbml_w).unwrap(); -} - -trait rbml_decoder_helper { - fn read_freevar_entry(&mut self, dcx: &DecodeContext) - -> hir::Freevar; - fn read_capture_mode(&mut self) -> hir::CaptureClause; -} - -impl<'a> rbml_decoder_helper for reader::Decoder<'a> { - fn read_freevar_entry(&mut self, dcx: &DecodeContext) - -> hir::Freevar { - let fv: hir::Freevar = Decodable::decode(self).unwrap(); - fv.tr(dcx) - } - - fn read_capture_mode(&mut self) -> hir::CaptureClause { - let cm: hir::CaptureClause = Decodable::decode(self).unwrap(); - cm - } -} - -impl tr for hir::Freevar { - fn tr(&self, dcx: &DecodeContext) -> hir::Freevar { - hir::Freevar { - def: self.def.tr(dcx), - span: self.span.tr(dcx), - } - } -} - -// ______________________________________________________________________ -// Encoding and decoding of MethodCallee - -trait read_method_callee_helper<'tcx> { - fn read_method_callee<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> (u32, ty::MethodCallee<'tcx>); -} - -fn encode_method_callee<'a, 'tcx>(ecx: &e::EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - autoderef: u32, - method: &ty::MethodCallee<'tcx>) { - use rustc_serialize::Encoder; - - rbml_w.emit_struct("MethodCallee", 4, |rbml_w| { - rbml_w.emit_struct_field("autoderef", 0, |rbml_w| { - autoderef.encode(rbml_w) - }); - rbml_w.emit_struct_field("def_id", 1, |rbml_w| { - Ok(rbml_w.emit_def_id(method.def_id)) - }); - rbml_w.emit_struct_field("ty", 2, |rbml_w| { - Ok(rbml_w.emit_ty(ecx, method.ty)) - }); - rbml_w.emit_struct_field("substs", 3, |rbml_w| { - Ok(rbml_w.emit_substs(ecx, &method.substs)) - }) - }).unwrap(); -} - -impl<'a, 'tcx> read_method_callee_helper<'tcx> for reader::Decoder<'a> { - fn read_method_callee<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> (u32, ty::MethodCallee<'tcx>) { - - self.read_struct("MethodCallee", 4, |this| { - let autoderef = this.read_struct_field("autoderef", 0, - Decodable::decode).unwrap(); - Ok((autoderef, ty::MethodCallee { - def_id: this.read_struct_field("def_id", 1, |this| { - Ok(this.read_def_id(dcx)) - }).unwrap(), - ty: this.read_struct_field("ty", 2, |this| { - Ok(this.read_ty(dcx)) - }).unwrap(), - substs: this.read_struct_field("substs", 3, |this| { - Ok(dcx.tcx.mk_substs(this.read_substs(dcx))) - }).unwrap() - })) - }).unwrap() - } -} - -pub fn encode_cast_kind(ebml_w: &mut Encoder, kind: cast::CastKind) { - kind.encode(ebml_w).unwrap(); -} - -// ______________________________________________________________________ -// Encoding and decoding the side tables - -trait rbml_writer_helpers<'tcx> { - fn emit_region(&mut self, ecx: &e::EncodeContext, r: ty::Region); - fn emit_ty<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, ty: Ty<'tcx>); - fn emit_tys<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, tys: &[Ty<'tcx>]); - fn emit_predicate<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, - predicate: &ty::Predicate<'tcx>); - fn emit_trait_ref<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, - ty: &ty::TraitRef<'tcx>); - fn emit_substs<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, - substs: &subst::Substs<'tcx>); - fn emit_existential_bounds<'b>(&mut self, ecx: &e::EncodeContext<'b,'tcx>, - bounds: &ty::ExistentialBounds<'tcx>); - fn emit_builtin_bounds(&mut self, ecx: &e::EncodeContext, bounds: &ty::BuiltinBounds); - fn emit_upvar_capture(&mut self, ecx: &e::EncodeContext, capture: &ty::UpvarCapture); - fn emit_auto_adjustment<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, - adj: &adjustment::AutoAdjustment<'tcx>); - fn emit_autoref<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, - autoref: &adjustment::AutoRef<'tcx>); - fn emit_auto_deref_ref<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, - auto_deref_ref: &adjustment::AutoDerefRef<'tcx>); -} - -impl<'a, 'tcx> rbml_writer_helpers<'tcx> for Encoder<'a> { - fn emit_region(&mut self, ecx: &e::EncodeContext, r: ty::Region) { - self.emit_opaque(|this| Ok(tyencode::enc_region(&mut this.cursor, - &ecx.ty_str_ctxt(), - r))); - } - - fn emit_ty<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, ty: Ty<'tcx>) { - self.emit_opaque(|this| Ok(tyencode::enc_ty(&mut this.cursor, - &ecx.ty_str_ctxt(), - ty))); - } - - fn emit_tys<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, tys: &[Ty<'tcx>]) { - self.emit_from_vec(tys, |this, ty| Ok(this.emit_ty(ecx, *ty))); - } - - fn emit_trait_ref<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, - trait_ref: &ty::TraitRef<'tcx>) { - self.emit_opaque(|this| Ok(tyencode::enc_trait_ref(&mut this.cursor, - &ecx.ty_str_ctxt(), - *trait_ref))); - } - - fn emit_predicate<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, - predicate: &ty::Predicate<'tcx>) { - self.emit_opaque(|this| { - Ok(tyencode::enc_predicate(&mut this.cursor, - &ecx.ty_str_ctxt(), - predicate)) - }); - } - - fn emit_existential_bounds<'b>(&mut self, ecx: &e::EncodeContext<'b,'tcx>, - bounds: &ty::ExistentialBounds<'tcx>) { - self.emit_opaque(|this| Ok(tyencode::enc_existential_bounds(&mut this.cursor, - &ecx.ty_str_ctxt(), - bounds))); - } - - fn emit_builtin_bounds(&mut self, ecx: &e::EncodeContext, bounds: &ty::BuiltinBounds) { - self.emit_opaque(|this| Ok(tyencode::enc_builtin_bounds(&mut this.cursor, - &ecx.ty_str_ctxt(), - bounds))); - } - - fn emit_upvar_capture(&mut self, ecx: &e::EncodeContext, capture: &ty::UpvarCapture) { - use rustc_serialize::Encoder; - - self.emit_enum("UpvarCapture", |this| { - match *capture { - ty::UpvarCapture::ByValue => { - this.emit_enum_variant("ByValue", 1, 0, |_| Ok(())) - } - ty::UpvarCapture::ByRef(ty::UpvarBorrow { kind, region }) => { - this.emit_enum_variant("ByRef", 2, 0, |this| { - this.emit_enum_variant_arg(0, - |this| kind.encode(this)); - this.emit_enum_variant_arg(1, - |this| Ok(this.emit_region(ecx, region))) - }) - } + let ii_pos = self.position(); + ii.encode(self).unwrap(); + + let tables_pos = self.position(); + let tables_count = { + let mut visitor = SideTableEncodingIdVisitor { + ecx: self, + count: 0 + }; + match ii { + InlinedItemRef::Item(_, i) => visitor.visit_item(i), + InlinedItemRef::TraitItem(_, ti) => visitor.visit_trait_item(ti), + InlinedItemRef::ImplItem(_, ii) => visitor.visit_impl_item(ii) } - }).unwrap() - } - - fn emit_substs<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, - substs: &subst::Substs<'tcx>) { - self.emit_opaque(|this| Ok(tyencode::enc_substs(&mut this.cursor, - &ecx.ty_str_ctxt(), - substs))); - } - - fn emit_auto_adjustment<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, - adj: &adjustment::AutoAdjustment<'tcx>) { - use rustc_serialize::Encoder; + visitor.count + }; - self.emit_enum("AutoAdjustment", |this| { - match *adj { - adjustment::AdjustReifyFnPointer => { - this.emit_enum_variant("AdjustReifyFnPointer", 1, 0, |_| Ok(())) - } - - adjustment::AdjustUnsafeFnPointer => { - this.emit_enum_variant("AdjustUnsafeFnPointer", 2, 0, |_| { - Ok(()) - }) - } - - adjustment::AdjustMutToConstPointer => { - this.emit_enum_variant("AdjustMutToConstPointer", 3, 0, |_| { - Ok(()) - }) - } - - adjustment::AdjustDerefRef(ref auto_deref_ref) => { - this.emit_enum_variant("AdjustDerefRef", 4, 2, |this| { - this.emit_enum_variant_arg(0, - |this| Ok(this.emit_auto_deref_ref(ecx, auto_deref_ref))) - }) - } - - adjustment::AdjustNeverToAny(ref ty) => { - this.emit_enum_variant("AdjustNeverToAny", 5, 1, |this| { - this.emit_enum_variant_arg(0, |this| Ok(this.emit_ty(ecx, ty))) - }) - } - } - }); - } - - fn emit_autoref<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, - autoref: &adjustment::AutoRef<'tcx>) { - use rustc_serialize::Encoder; - - self.emit_enum("AutoRef", |this| { - match autoref { - &adjustment::AutoPtr(r, m) => { - this.emit_enum_variant("AutoPtr", 0, 2, |this| { - this.emit_enum_variant_arg(0, - |this| Ok(this.emit_region(ecx, *r))); - this.emit_enum_variant_arg(1, |this| m.encode(this)) - }) - } - &adjustment::AutoUnsafe(m) => { - this.emit_enum_variant("AutoUnsafe", 1, 1, |this| { - this.emit_enum_variant_arg(0, |this| m.encode(this)) - }) - } - } - }); - } - - fn emit_auto_deref_ref<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, - auto_deref_ref: &adjustment::AutoDerefRef<'tcx>) { - use rustc_serialize::Encoder; - - self.emit_struct("AutoDerefRef", 2, |this| { - this.emit_struct_field("autoderefs", 0, |this| auto_deref_ref.autoderefs.encode(this)); - - this.emit_struct_field("autoref", 1, |this| { - this.emit_option(|this| { - match auto_deref_ref.autoref { - None => this.emit_option_none(), - Some(ref a) => this.emit_option_some(|this| Ok(this.emit_autoref(ecx, a))), - } - }) - }); - - this.emit_struct_field("unsize", 2, |this| { - this.emit_option(|this| { - match auto_deref_ref.unsize { - None => this.emit_option_none(), - Some(target) => this.emit_option_some(|this| { - Ok(this.emit_ty(ecx, target)) - }) - } - }) - }) - }); - } -} - -trait write_tag_and_id { - fn tag(&mut self, tag_id: c::astencode_tag, f: F) where F: FnOnce(&mut Self); - fn id(&mut self, id: ast::NodeId); -} - -impl<'a> write_tag_and_id for Encoder<'a> { - fn tag(&mut self, - tag_id: c::astencode_tag, - f: F) where - F: FnOnce(&mut Encoder<'a>), - { - self.start_tag(tag_id as usize); - f(self); - self.end_tag(); - } - - fn id(&mut self, id: ast::NodeId) { - id.encode(self).unwrap(); + self.lazy(&Ast { + id_range: id_visitor.result(), + item: Lazy::with_position(ii_pos), + side_tables: LazySeq::with_position_and_length(tables_pos, tables_count) + }) } } -struct SideTableEncodingIdVisitor<'a, 'b:'a, 'c:'a, 'tcx:'c> { - ecx: &'a e::EncodeContext<'c, 'tcx>, - rbml_w: &'a mut Encoder<'b>, +struct SideTableEncodingIdVisitor<'a, 'b:'a, 'tcx:'b> { + ecx: &'a mut EncodeContext<'b, 'tcx>, + count: usize } -impl<'a, 'b, 'c, 'tcx, 'v> Visitor<'v> for - SideTableEncodingIdVisitor<'a, 'b, 'c, 'tcx> { +impl<'a, 'b, 'tcx, 'v> Visitor<'v> for SideTableEncodingIdVisitor<'a, 'b, 'tcx> { fn visit_id(&mut self, id: ast::NodeId) { - encode_side_tables_for_id(self.ecx, self.rbml_w, id) - } -} + debug!("Encoding side tables for id {}", id); -fn encode_side_tables_for_ii(ecx: &e::EncodeContext, - rbml_w: &mut Encoder, - ii: &InlinedItem) { - rbml_w.start_tag(c::tag_table as usize); - ii.visit(&mut SideTableEncodingIdVisitor { - ecx: ecx, - rbml_w: rbml_w - }); - rbml_w.end_tag(); -} - -fn encode_side_tables_for_id(ecx: &e::EncodeContext, - rbml_w: &mut Encoder, - id: ast::NodeId) { - let tcx = ecx.tcx; - - debug!("Encoding side tables for id {}", id); - - if let Some(def) = tcx.expect_def_or_none(id) { - rbml_w.tag(c::tag_table_def, |rbml_w| { - rbml_w.id(id); - def.encode(rbml_w).unwrap(); - }) - } - - if let Some(ty) = tcx.node_types().get(&id) { - rbml_w.tag(c::tag_table_node_type, |rbml_w| { - rbml_w.id(id); - rbml_w.emit_ty(ecx, *ty); - }) - } - - if let Some(item_substs) = tcx.tables.borrow().item_substs.get(&id) { - rbml_w.tag(c::tag_table_item_subst, |rbml_w| { - rbml_w.id(id); - rbml_w.emit_substs(ecx, &item_substs.substs); - }) - } - - if let Some(fv) = tcx.freevars.borrow().get(&id) { - rbml_w.tag(c::tag_table_freevars, |rbml_w| { - rbml_w.id(id); - rbml_w.emit_from_vec(fv, |rbml_w, fv_entry| { - Ok(encode_freevar_entry(rbml_w, fv_entry)) - }); - }); - - for freevar in fv { - rbml_w.tag(c::tag_table_upvar_capture_map, |rbml_w| { - rbml_w.id(id); - - let var_id = freevar.def.var_id(); - let upvar_id = ty::UpvarId { - var_id: var_id, - closure_expr_id: id - }; - let upvar_capture = tcx.tables - .borrow() - .upvar_capture_map - .get(&upvar_id) - .unwrap() - .clone(); - var_id.encode(rbml_w); - rbml_w.emit_upvar_capture(ecx, &upvar_capture); - }) - } - } - - let method_call = ty::MethodCall::expr(id); - if let Some(method) = tcx.tables.borrow().method_map.get(&method_call) { - rbml_w.tag(c::tag_table_method_map, |rbml_w| { - rbml_w.id(id); - encode_method_callee(ecx, rbml_w, method_call.autoderef, method) - }) - } - - if let Some(adjustment) = tcx.tables.borrow().adjustments.get(&id) { - match *adjustment { - adjustment::AdjustDerefRef(ref adj) => { - for autoderef in 0..adj.autoderefs { - let method_call = ty::MethodCall::autoderef(id, autoderef as u32); - if let Some(method) = tcx.tables.borrow().method_map.get(&method_call) { - rbml_w.tag(c::tag_table_method_map, |rbml_w| { - rbml_w.id(id); - encode_method_callee(ecx, rbml_w, - method_call.autoderef, method) - }) - } - } + let tcx = self.ecx.tcx; + let mut encode = |entry: Option| { + if let Some(entry) = entry { + (id, entry).encode(self.ecx).unwrap(); + self.count += 1; } - _ => {} - } + }; - rbml_w.tag(c::tag_table_adjustments, |rbml_w| { - rbml_w.id(id); - rbml_w.emit_auto_adjustment(ecx, adjustment); - }) - } - - if let Some(cast_kind) = tcx.cast_kinds.borrow().get(&id) { - rbml_w.tag(c::tag_table_cast_kinds, |rbml_w| { - rbml_w.id(id); - encode_cast_kind(rbml_w, *cast_kind) - }) - } - - if let Some(qualif) = tcx.const_qualif_map.borrow().get(&id) { - rbml_w.tag(c::tag_table_const_qualif, |rbml_w| { - rbml_w.id(id); - qualif.encode(rbml_w).unwrap() - }) - } -} - -trait doc_decoder_helpers: Sized { - fn as_int(&self) -> isize; - fn opt_child(&self, tag: c::astencode_tag) -> Option; -} - -impl<'a> doc_decoder_helpers for rbml::Doc<'a> { - fn as_int(&self) -> isize { reader::doc_as_u64(*self) as isize } - fn opt_child(&self, tag: c::astencode_tag) -> Option> { - reader::maybe_get_doc(*self, tag as usize) + encode(tcx.expect_def_or_none(id).map(TableEntry::Def)); + encode(tcx.node_types().get(&id).cloned().map(TableEntry::NodeType)); + encode(tcx.tables.borrow().item_substs.get(&id).cloned().map(TableEntry::ItemSubsts)); + encode(tcx.tables.borrow().adjustments.get(&id).cloned().map(TableEntry::Adjustment)); + encode(tcx.const_qualif_map.borrow().get(&id).cloned().map(TableEntry::ConstQualif)); } } -trait rbml_decoder_decoder_helpers<'tcx> { - fn read_ty_encoded<'a, 'b, F, R>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>, - f: F) -> R - where F: for<'x> FnOnce(&mut tydecode::TyDecoder<'x, 'tcx>) -> R; - - fn read_region(&mut self, dcx: &DecodeContext) -> ty::Region; - fn read_ty<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) -> Ty<'tcx>; - fn read_tys<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) -> Vec>; - fn read_trait_ref<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> ty::TraitRef<'tcx>; - fn read_poly_trait_ref<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> ty::PolyTraitRef<'tcx>; - fn read_predicate<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> ty::Predicate<'tcx>; - fn read_existential_bounds<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> ty::ExistentialBounds<'tcx>; - fn read_substs<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> subst::Substs<'tcx>; - fn read_upvar_capture(&mut self, dcx: &DecodeContext) - -> ty::UpvarCapture; - fn read_auto_adjustment<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> adjustment::AutoAdjustment<'tcx>; - fn read_cast_kind<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> cast::CastKind; - fn read_auto_deref_ref<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> adjustment::AutoDerefRef<'tcx>; - fn read_autoref<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> adjustment::AutoRef<'tcx>; - - // Versions of the type reading functions that don't need the full - // DecodeContext. - fn read_ty_nodcx<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - cdata: &cstore::CrateMetadata) -> Ty<'tcx>; - fn read_tys_nodcx<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - cdata: &cstore::CrateMetadata) -> Vec>; - fn read_substs_nodcx<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - cdata: &cstore::CrateMetadata) - -> subst::Substs<'tcx>; -} - -impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> { - fn read_ty_nodcx<'b>(&mut self, tcx: TyCtxt<'b, 'tcx, 'tcx>, - cdata: &cstore::CrateMetadata) - -> Ty<'tcx> { - self.read_opaque(|_, doc| { - Ok( - tydecode::TyDecoder::with_doc(tcx, cdata.cnum, doc, - &mut |id| decoder::translate_def_id(cdata, id)) - .parse_ty()) - }).unwrap() - } - - fn read_tys_nodcx<'b>(&mut self, tcx: TyCtxt<'b, 'tcx, 'tcx>, - cdata: &cstore::CrateMetadata) -> Vec> { - self.read_to_vec(|this| Ok(this.read_ty_nodcx(tcx, cdata)) ) - .unwrap() - .into_iter() - .collect() - } - - fn read_substs_nodcx<'b>(&mut self, tcx: TyCtxt<'b, 'tcx, 'tcx>, - cdata: &cstore::CrateMetadata) - -> subst::Substs<'tcx> - { - self.read_opaque(|_, doc| { - Ok( - tydecode::TyDecoder::with_doc(tcx, cdata.cnum, doc, - &mut |id| decoder::translate_def_id(cdata, id)) - .parse_substs()) - }).unwrap() - } - - fn read_ty_encoded<'b, 'c, F, R>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>, op: F) -> R - where F: for<'x> FnOnce(&mut tydecode::TyDecoder<'x,'tcx>) -> R - { - return self.read_opaque(|_, doc| { - debug!("read_ty_encoded({})", type_string(doc)); - Ok(op( - &mut tydecode::TyDecoder::with_doc( - dcx.tcx, dcx.cdata.cnum, doc, - &mut |d| convert_def_id(dcx, d)))) - }).unwrap(); - - fn type_string(doc: rbml::Doc) -> String { - let mut str = String::new(); - for i in doc.start..doc.end { - str.push(doc.data[i] as char); - } - str - } - } - fn read_region(&mut self, dcx: &DecodeContext) -> ty::Region { - // Note: regions types embed local node ids. In principle, we - // should translate these node ids into the new decode - // context. However, we do not bother, because region types - // are not used during trans. This also applies to read_ty. - return self.read_ty_encoded(dcx, |decoder| decoder.parse_region()); - } - fn read_ty<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) -> Ty<'tcx> { - return self.read_ty_encoded(dcx, |decoder| decoder.parse_ty()); - } - - fn read_tys<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> Vec> { - self.read_to_vec(|this| Ok(this.read_ty(dcx))).unwrap().into_iter().collect() - } - - fn read_trait_ref<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> ty::TraitRef<'tcx> { - self.read_ty_encoded(dcx, |decoder| decoder.parse_trait_ref()) - } - - fn read_poly_trait_ref<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> ty::PolyTraitRef<'tcx> { - ty::Binder(self.read_ty_encoded(dcx, |decoder| decoder.parse_trait_ref())) - } - - fn read_predicate<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> ty::Predicate<'tcx> - { - self.read_ty_encoded(dcx, |decoder| decoder.parse_predicate()) - } - - fn read_existential_bounds<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> ty::ExistentialBounds<'tcx> - { - self.read_ty_encoded(dcx, |decoder| decoder.parse_existential_bounds()) - } - - fn read_substs<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> subst::Substs<'tcx> { - self.read_opaque(|_, doc| { - Ok(tydecode::TyDecoder::with_doc(dcx.tcx, dcx.cdata.cnum, doc, - &mut |d| convert_def_id(dcx, d)) - .parse_substs()) - }).unwrap() - } - fn read_upvar_capture(&mut self, dcx: &DecodeContext) -> ty::UpvarCapture { - self.read_enum("UpvarCapture", |this| { - let variants = ["ByValue", "ByRef"]; - this.read_enum_variant(&variants, |this, i| { - Ok(match i { - 1 => ty::UpvarCapture::ByValue, - 2 => ty::UpvarCapture::ByRef(ty::UpvarBorrow { - kind: this.read_enum_variant_arg(0, - |this| Decodable::decode(this)).unwrap(), - region: this.read_enum_variant_arg(1, - |this| Ok(this.read_region(dcx))).unwrap() - }), - _ => bug!("bad enum variant for ty::UpvarCapture") - }) - }) - }).unwrap() - } - fn read_auto_adjustment<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> adjustment::AutoAdjustment<'tcx> { - self.read_enum("AutoAdjustment", |this| { - let variants = ["AdjustReifyFnPointer", "AdjustUnsafeFnPointer", - "AdjustMutToConstPointer", "AdjustDerefRef", - "AdjustNeverToAny"]; - this.read_enum_variant(&variants, |this, i| { - Ok(match i { - 1 => adjustment::AdjustReifyFnPointer, - 2 => adjustment::AdjustUnsafeFnPointer, - 3 => adjustment::AdjustMutToConstPointer, - 4 => { - let auto_deref_ref: adjustment::AutoDerefRef = - this.read_enum_variant_arg(0, - |this| Ok(this.read_auto_deref_ref(dcx))).unwrap(); - - adjustment::AdjustDerefRef(auto_deref_ref) - } - 5 => { - let ty: Ty<'tcx> = this.read_enum_variant_arg(0, |this| { - Ok(this.read_ty(dcx)) - }).unwrap(); - - adjustment::AdjustNeverToAny(ty) - } - _ => bug!("bad enum variant for adjustment::AutoAdjustment") - }) - }) - }).unwrap() - } - - fn read_auto_deref_ref<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> adjustment::AutoDerefRef<'tcx> { - self.read_struct("AutoDerefRef", 2, |this| { - Ok(adjustment::AutoDerefRef { - autoderefs: this.read_struct_field("autoderefs", 0, |this| { - Decodable::decode(this) - }).unwrap(), - autoref: this.read_struct_field("autoref", 1, |this| { - this.read_option(|this, b| { - if b { - Ok(Some(this.read_autoref(dcx))) - } else { - Ok(None) - } - }) - }).unwrap(), - unsize: this.read_struct_field("unsize", 2, |this| { - this.read_option(|this, b| { - if b { - Ok(Some(this.read_ty(dcx))) - } else { - Ok(None) - } - }) - }).unwrap(), - }) - }).unwrap() - } - - fn read_autoref<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> adjustment::AutoRef<'tcx> { - self.read_enum("AutoRef", |this| { - let variants = ["AutoPtr", "AutoUnsafe"]; - this.read_enum_variant(&variants, |this, i| { - Ok(match i { - 0 => { - let r: ty::Region = - this.read_enum_variant_arg(0, |this| { - Ok(this.read_region(dcx)) - }).unwrap(); - let m: hir::Mutability = - this.read_enum_variant_arg(1, |this| { - Decodable::decode(this) - }).unwrap(); - - adjustment::AutoPtr(dcx.tcx.mk_region(r), m) - } - 1 => { - let m: hir::Mutability = - this.read_enum_variant_arg(0, |this| Decodable::decode(this)).unwrap(); - - adjustment::AutoUnsafe(m) - } - _ => bug!("bad enum variant for adjustment::AutoRef") - }) - }) - }).unwrap() - } - - fn read_cast_kind<'b, 'c>(&mut self, _dcx: &DecodeContext<'b, 'c, 'tcx>) - -> cast::CastKind - { - Decodable::decode(self).unwrap() - } -} - -// Converts a def-id that appears in a type. The correct -// translation will depend on what kind of def-id this is. -// This is a subtle point: type definitions are not -// inlined into the current crate, so if the def-id names -// a nominal type or type alias, then it should be -// translated to refer to the source crate. -// -// However, *type parameters* are cloned along with the function -// they are attached to. So we should translate those def-ids -// to refer to the new, cloned copy of the type parameter. -// We only see references to free type parameters in the body of -// an inlined function. In such cases, we need the def-id to -// be a local id so that the TypeContents code is able to lookup -// the relevant info in the ty_param_defs table. -// -// *Region parameters*, unfortunately, are another kettle of fish. -// In such cases, def_id's can appear in types to distinguish -// shadowed bound regions and so forth. It doesn't actually -// matter so much what we do to these, since regions are erased -// at trans time, but it's good to keep them consistent just in -// case. We translate them with `tr_def_id()` which will map -// the crate numbers back to the original source crate. -// -// Scopes will end up as being totally bogus. This can actually -// be fixed though. -// -// Unboxed closures are cloned along with the function being -// inlined, and all side tables use interned node IDs, so we -// translate their def IDs accordingly. -// -// It'd be really nice to refactor the type repr to not include -// def-ids so that all these distinctions were unnecessary. -fn convert_def_id(dcx: &DecodeContext, - did: DefId) - -> DefId { - let r = dcx.tr_def_id(did); - debug!("convert_def_id(did={:?})={:?}", did, r); - return r; -} - -fn decode_side_tables(dcx: &DecodeContext, - ast_doc: rbml::Doc) { - let tbl_doc = ast_doc.get(c::tag_table as usize); - for (tag, entry_doc) in reader::docs(tbl_doc) { - let mut entry_dsr = reader::Decoder::new(entry_doc); - let id0: ast::NodeId = Decodable::decode(&mut entry_dsr).unwrap(); - let id = dcx.tr_id(id0); - - debug!(">> Side table document with tag 0x{:x} \ - found for id {} (orig {})", - tag, id, id0); - let tag = tag as u32; - let decoded_tag: Option = c::astencode_tag::from_u32(tag); - match decoded_tag { - None => { - bug!("unknown tag found in side tables: {:x}", tag); - } - Some(value) => { - let val_dsr = &mut entry_dsr; - - match value { - c::tag_table_def => { - let def = decode_def(dcx, val_dsr); - dcx.tcx.def_map.borrow_mut().insert(id, def::PathResolution::new(def)); - } - c::tag_table_node_type => { - let ty = val_dsr.read_ty(dcx); - debug!("inserting ty for node {}: {:?}", - id, ty); - dcx.tcx.node_type_insert(id, ty); - } - c::tag_table_item_subst => { - let item_substs = ty::ItemSubsts { - substs: dcx.tcx.mk_substs(val_dsr.read_substs(dcx)) - }; - dcx.tcx.tables.borrow_mut().item_substs.insert( - id, item_substs); - } - c::tag_table_freevars => { - let fv_info = val_dsr.read_to_vec(|val_dsr| { - Ok(val_dsr.read_freevar_entry(dcx)) - }).unwrap().into_iter().collect(); - dcx.tcx.freevars.borrow_mut().insert(id, fv_info); - } - c::tag_table_upvar_capture_map => { - let var_id: ast::NodeId = Decodable::decode(val_dsr).unwrap(); - let upvar_id = ty::UpvarId { - var_id: dcx.tr_id(var_id), - closure_expr_id: id - }; - let ub = val_dsr.read_upvar_capture(dcx); - dcx.tcx.tables.borrow_mut().upvar_capture_map.insert(upvar_id, ub); - } - c::tag_table_method_map => { - let (autoderef, method) = val_dsr.read_method_callee(dcx); - let method_call = ty::MethodCall { - expr_id: id, - autoderef: autoderef - }; - dcx.tcx.tables.borrow_mut().method_map.insert(method_call, method); - } - c::tag_table_adjustments => { - let adj = - val_dsr.read_auto_adjustment(dcx); - dcx.tcx.tables.borrow_mut().adjustments.insert(id, adj); - } - c::tag_table_cast_kinds => { - let cast_kind = - val_dsr.read_cast_kind(dcx); - dcx.tcx.cast_kinds.borrow_mut().insert(id, cast_kind); - } - c::tag_table_const_qualif => { - let qualif: ConstQualif = Decodable::decode(val_dsr).unwrap(); - dcx.tcx.const_qualif_map.borrow_mut().insert(id, qualif); - } - _ => { - bug!("unknown tag found in side tables: {:x}", tag); - } - } - } - } +/// Decodes an item from its AST in the cdata's metadata and adds it to the +/// ast-map. +pub fn decode_inlined_item<'a, 'tcx>(cdata: &CrateMetadata, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + parent_def_path: ast_map::DefPath, + parent_did: DefId, + ast: Ast<'tcx>, + orig_did: DefId) + -> &'tcx InlinedItem { + debug!("> Decoding inlined fn: {:?}", tcx.item_path_str(orig_did)); - debug!(">< Side table doc loaded"); - } -} + let cnt = ast.id_range.max.as_usize() - ast.id_range.min.as_usize(); + let start = tcx.sess.reserve_node_ids(cnt); + let id_ranges = [ast.id_range, IdRange { + min: start, + max: ast::NodeId::new(start.as_usize() + cnt) + }]; -// copy the tcache entries from the original item to the new -// inlined item -fn copy_item_types(dcx: &DecodeContext, ii: &InlinedItem, orig_did: DefId) { - fn copy_item_type(dcx: &DecodeContext, - inlined_id: ast::NodeId, - remote_did: DefId) { - let inlined_did = dcx.tcx.map.local_def_id(inlined_id); - dcx.tcx.register_item_type(inlined_did, - dcx.tcx.lookup_item_type(remote_did)); + let ii = ast.item.decode((cdata, tcx, id_ranges)); + let ii = ast_map::map_decoded_item(&tcx.map, + parent_def_path, + parent_did, + ii, + tcx.sess.next_node_id()); - } - // copy the entry for the item itself let item_node_id = match ii { &InlinedItem::Item(_, ref i) => i.id, &InlinedItem::TraitItem(_, ref ti) => ti.id, - &InlinedItem::ImplItem(_, ref ii) => ii.id, - &InlinedItem::Foreign(_, ref fi) => fi.id + &InlinedItem::ImplItem(_, ref ii) => ii.id }; - copy_item_type(dcx, item_node_id, orig_did); + let inlined_did = tcx.map.local_def_id(item_node_id); + tcx.register_item_type(inlined_did, tcx.lookup_item_type(orig_did)); - // copy the entries of inner items - if let &InlinedItem::Item(_, ref item) = ii { - match item.node { - hir::ItemEnum(ref def, _) => { - let orig_def = dcx.tcx.lookup_adt_def(orig_did); - for (i_variant, orig_variant) in - def.variants.iter().zip(orig_def.variants.iter()) - { - debug!("astencode: copying variant {:?} => {:?}", - orig_variant.did, i_variant.node.data.id()); - copy_item_type(dcx, i_variant.node.data.id(), orig_variant.did); - } + for (id, entry) in ast.side_tables.decode((cdata, tcx, id_ranges)) { + match entry { + TableEntry::Def(def) => { + tcx.def_map.borrow_mut().insert(id, def::PathResolution::new(def)); } - hir::ItemStruct(ref def, _) => { - if !def.is_struct() { - let ctor_did = dcx.tcx.lookup_adt_def(orig_did) - .struct_variant().did; - debug!("astencode: copying ctor {:?} => {:?}", ctor_did, - def.id()); - copy_item_type(dcx, def.id(), ctor_did); - } + TableEntry::NodeType(ty) => { + tcx.node_type_insert(id, ty); } - _ => {} - } - } -} - -fn inlined_item_id_range(ii: &InlinedItem) -> IdRange { - let mut visitor = IdRangeComputingVisitor::new(); - ii.visit(&mut visitor); - visitor.result() -} - -// ______________________________________________________________________ -// Testing of astencode_gen - -#[cfg(test)] -fn encode_item_ast(rbml_w: &mut Encoder, item: &hir::Item) { - rbml_w.start_tag(c::tag_tree as usize); - (*item).encode(rbml_w); - rbml_w.end_tag(); -} - -#[cfg(test)] -fn decode_item_ast(item_doc: rbml::Doc) -> hir::Item { - let chi_doc = item_doc.get(c::tag_tree as usize); - let mut d = reader::Decoder::new(chi_doc); - Decodable::decode(&mut d).unwrap() -} - -#[cfg(test)] -trait FakeExtCtxt { - fn call_site(&self) -> syntax_pos::Span; - fn cfg(&self) -> ast::CrateConfig; - fn ident_of(&self, st: &str) -> ast::Ident; - fn name_of(&self, st: &str) -> ast::Name; - fn parse_sess(&self) -> &parse::ParseSess; -} - -#[cfg(test)] -impl FakeExtCtxt for parse::ParseSess { - fn call_site(&self) -> syntax_pos::Span { - syntax_pos::Span { - lo: syntax_pos::BytePos(0), - hi: syntax_pos::BytePos(0), - expn_id: syntax_pos::NO_EXPANSION, - } - } - fn cfg(&self) -> ast::CrateConfig { Vec::new() } - fn ident_of(&self, st: &str) -> ast::Ident { - parse::token::str_to_ident(st) - } - fn name_of(&self, st: &str) -> ast::Name { - parse::token::intern(st) - } - fn parse_sess(&self) -> &parse::ParseSess { self } -} - -#[cfg(test)] -fn mk_ctxt() -> parse::ParseSess { - parse::ParseSess::new() -} - -#[cfg(test)] -fn with_testing_context T>(f: F) -> T { - let mut resolver = DummyResolver; - let mut lcx = LoweringContext::testing_context(&mut resolver); - f(&mut lcx) -} - -#[cfg(test)] -fn roundtrip(in_item: hir::Item) { - let mut wr = Cursor::new(Vec::new()); - encode_item_ast(&mut Encoder::new(&mut wr), &in_item); - let rbml_doc = rbml::Doc::new(wr.get_ref()); - let out_item = decode_item_ast(rbml_doc); - - assert!(in_item == out_item); -} - -#[test] -fn test_basic() { - let cx = mk_ctxt(); - with_testing_context(|lcx| { - roundtrip(lcx.lower_item("e_item!(&cx, - fn foo() {} - ).unwrap())); - }); -} - -#[test] -fn test_smalltalk() { - let cx = mk_ctxt(); - with_testing_context(|lcx| { - roundtrip(lcx.lower_item("e_item!(&cx, - fn foo() -> isize { 3 + 4 } // first smalltalk program ever executed. - ).unwrap())); - }); -} - -#[test] -fn test_more() { - let cx = mk_ctxt(); - with_testing_context(|lcx| { - roundtrip(lcx.lower_item("e_item!(&cx, - fn foo(x: usize, y: usize) -> usize { - let z = x + y; - return z; + TableEntry::ItemSubsts(item_substs) => { + tcx.tables.borrow_mut().item_substs.insert(id, item_substs); } - ).unwrap())); - }); -} - -#[test] -fn test_simplification() { - use middle::cstore::LOCAL_CRATE; - use rustc::hir::def_id::CRATE_DEF_INDEX; - - let cx = mk_ctxt(); - let item = quote_item!(&cx, - fn new_int_alist() -> alist { - fn eq_int(a: isize, b: isize) -> bool { a == b } - return alist {eq_fn: eq_int, data: Vec::new()}; - } - ).unwrap(); - let cx = mk_ctxt(); - with_testing_context(|lcx| { - let hir_item = lcx.lower_item(&item); - let def_id = DefId { krate: LOCAL_CRATE, index: CRATE_DEF_INDEX }; // dummy - let item_in = InlinedItemRef::Item(def_id, &hir_item); - let (item_out, _) = simplify_ast(item_in); - let item_exp = InlinedItem::Item(def_id, P(lcx.lower_item("e_item!(&cx, - fn new_int_alist() -> alist { - return alist {eq_fn: eq_int, data: Vec::new()}; + TableEntry::Adjustment(adj) => { + tcx.tables.borrow_mut().adjustments.insert(id, adj); } - ).unwrap()))); - match (item_out, item_exp) { - (InlinedItem::Item(_, item_out), InlinedItem::Item(_, item_exp)) => { - assert!(pprust::item_to_string(&item_out) == - pprust::item_to_string(&item_exp)); + TableEntry::ConstQualif(qualif) => { + tcx.const_qualif_map.borrow_mut().insert(id, qualif); } - _ => bug!() } - }); + } + + ii } diff --git a/src/librustc_metadata/common.rs b/src/librustc_metadata/common.rs deleted file mode 100644 index ff072cce5d..0000000000 --- a/src/librustc_metadata/common.rs +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(non_camel_case_types, non_upper_case_globals)] - -pub use self::astencode_tag::*; - -// RBML enum definitions and utils shared by the encoder and decoder -// -// 0x00..0x1f: reserved for RBML generic type tags -// 0x20..0xef: free for use, preferred for frequent tags -// 0xf0..0xff: internally used by RBML to encode 0x100..0xfff in two bytes -// 0x100..0xfff: free for use, preferred for infrequent tags - -pub const tag_items: usize = 0x100; // top-level only - -pub const tag_paths_data_name: usize = 0x20; - -pub const tag_def_id: usize = 0x21; - -pub const tag_items_data: usize = 0x22; - -pub const tag_items_data_item: usize = 0x23; - -pub const tag_items_data_item_family: usize = 0x24; - -pub const tag_items_data_item_type: usize = 0x25; - -// GAP 0x26 - -pub const tag_items_data_item_variant: usize = 0x27; - -pub const tag_items_data_parent_item: usize = 0x28; - -pub const tag_items_data_item_is_tuple_struct_ctor: usize = 0x29; - -pub const tag_items_closure_kind: usize = 0x2a; -pub const tag_items_closure_ty: usize = 0x2b; -pub const tag_def_key: usize = 0x2c; - -// GAP 0x2d 0x2e - -pub const tag_index: usize = 0x110; // top-level only -pub const tag_xref_index: usize = 0x111; // top-level only -pub const tag_xref_data: usize = 0x112; // top-level only - -pub const tag_meta_item_name_value: usize = 0x2f; - -pub const tag_meta_item_name: usize = 0x30; - -pub const tag_meta_item_value: usize = 0x31; - -pub const tag_attributes: usize = 0x101; // top-level only - -pub const tag_attribute: usize = 0x32; - -pub const tag_meta_item_word: usize = 0x33; - -pub const tag_meta_item_list: usize = 0x34; - -// The list of crates that this crate depends on -pub const tag_crate_deps: usize = 0x102; // top-level only - -// A single crate dependency -pub const tag_crate_dep: usize = 0x35; - -pub const tag_crate_hash: usize = 0x103; // top-level only -pub const tag_crate_crate_name: usize = 0x104; // top-level only -pub const tag_crate_disambiguator: usize = 0x113; // top-level only - -pub const tag_crate_dep_crate_name: usize = 0x36; -pub const tag_crate_dep_hash: usize = 0x37; -pub const tag_crate_dep_explicitly_linked: usize = 0x38; // top-level only - -pub const tag_item_trait_item: usize = 0x3a; - -pub const tag_item_trait_ref: usize = 0x3b; - -// discriminator value for variants -pub const tag_disr_val: usize = 0x3c; - -// GAP 0x3d, 0x3e, 0x3f, 0x40 - -pub const tag_item_field: usize = 0x41; -// GAP 0x42 -pub const tag_item_variances: usize = 0x43; -/* - trait items contain tag_item_trait_item elements, - impl items contain tag_item_impl_item elements, and classes - have both. That's because some code treats classes like traits, - and other code treats them like impls. Because classes can contain - both, tag_item_trait_item and tag_item_impl_item have to be two - different tags. - */ -pub const tag_item_impl_item: usize = 0x44; -pub const tag_item_trait_method_explicit_self: usize = 0x45; - - -// Reexports are found within module tags. Each reexport contains def_ids -// and names. -pub const tag_items_data_item_reexport: usize = 0x46; -pub const tag_items_data_item_reexport_def_id: usize = 0x47; -pub const tag_items_data_item_reexport_name: usize = 0x48; - -// used to encode crate_ctxt side tables -enum_from_u32! { - #[derive(Copy, Clone, PartialEq)] - #[repr(usize)] - pub enum astencode_tag { // Reserves 0x50 -- 0x6f - tag_ast = 0x50, - - tag_tree = 0x51, - - tag_mir = 0x52, - - tag_table = 0x53, - // GAP 0x54, 0x55 - tag_table_def = 0x56, - tag_table_node_type = 0x57, - tag_table_item_subst = 0x58, - tag_table_freevars = 0x59, - // GAP 0x5a, 0x5b, 0x5c, 0x5d, 0x5e - tag_table_method_map = 0x5f, - // GAP 0x60 - tag_table_adjustments = 0x61, - // GAP 0x62, 0x63, 0x64, 0x65 - tag_table_upvar_capture_map = 0x66, - // GAP 0x67, 0x68 - tag_table_const_qualif = 0x69, - tag_table_cast_kinds = 0x6a, - } -} - -pub const tag_item_trait_item_sort: usize = 0x70; - -pub const tag_crate_triple: usize = 0x105; // top-level only - -pub const tag_dylib_dependency_formats: usize = 0x106; // top-level only - -// Language items are a top-level directory (for speed). Hierarchy: -// -// tag_lang_items -// - tag_lang_items_item -// - tag_lang_items_item_id: u32 -// - tag_lang_items_item_index: u32 - -pub const tag_lang_items: usize = 0x107; // top-level only -pub const tag_lang_items_item: usize = 0x73; -pub const tag_lang_items_item_id: usize = 0x74; -pub const tag_lang_items_item_index: usize = 0x75; -pub const tag_lang_items_missing: usize = 0x76; - -pub const tag_item_unnamed_field: usize = 0x77; -pub const tag_items_data_item_visibility: usize = 0x78; -pub const tag_items_data_item_inherent_impl: usize = 0x79; -// GAP 0x7a -pub const tag_mod_child: usize = 0x7b; -pub const tag_misc_info: usize = 0x108; // top-level only -pub const tag_misc_info_crate_items: usize = 0x7c; - -pub const tag_impls: usize = 0x109; // top-level only -pub const tag_impls_trait: usize = 0x7d; -pub const tag_impls_trait_impl: usize = 0x7e; - -// GAP 0x7f, 0x80, 0x81 - -pub const tag_native_libraries: usize = 0x10a; // top-level only -pub const tag_native_libraries_lib: usize = 0x82; -pub const tag_native_libraries_name: usize = 0x83; -pub const tag_native_libraries_kind: usize = 0x84; - -pub const tag_plugin_registrar_fn: usize = 0x10b; // top-level only - -pub const tag_method_argument_names: usize = 0x85; -pub const tag_method_argument_name: usize = 0x86; - -pub const tag_reachable_ids: usize = 0x10c; // top-level only -pub const tag_reachable_id: usize = 0x87; - -pub const tag_items_data_item_stability: usize = 0x88; - -pub const tag_items_data_item_repr: usize = 0x89; - -pub const tag_struct_fields: usize = 0x10d; // top-level only -pub const tag_struct_field: usize = 0x8a; - -pub const tag_items_data_item_struct_ctor: usize = 0x8b; -pub const tag_attribute_is_sugared_doc: usize = 0x8c; -// GAP 0x8d -pub const tag_items_data_region: usize = 0x8e; - -pub const tag_region_param_def: usize = 0x8f; -pub const tag_region_param_def_ident: usize = 0x90; -pub const tag_region_param_def_def_id: usize = 0x91; -pub const tag_region_param_def_space: usize = 0x92; -pub const tag_region_param_def_index: usize = 0x93; - -pub const tag_type_param_def: usize = 0x94; - -pub const tag_item_generics: usize = 0x95; -pub const tag_method_ty_generics: usize = 0x96; - -pub const tag_type_predicate: usize = 0x97; -pub const tag_self_predicate: usize = 0x98; -pub const tag_fn_predicate: usize = 0x99; - -pub const tag_unsafety: usize = 0x9a; - -pub const tag_associated_type_names: usize = 0x9b; -pub const tag_associated_type_name: usize = 0x9c; - -pub const tag_polarity: usize = 0x9d; - -pub const tag_macro_defs: usize = 0x10e; // top-level only -pub const tag_macro_def: usize = 0x9e; -pub const tag_macro_def_body: usize = 0x9f; -pub const tag_macro_def_span_lo: usize = 0xa8; -pub const tag_macro_def_span_hi: usize = 0xa9; - -pub const tag_paren_sugar: usize = 0xa0; - -pub const tag_codemap: usize = 0xa1; -pub const tag_codemap_filemap: usize = 0xa2; - -pub const tag_item_super_predicates: usize = 0xa3; - -pub const tag_defaulted_trait: usize = 0xa4; - -pub const tag_impl_coerce_unsized_kind: usize = 0xa5; - -pub const tag_items_data_item_constness: usize = 0xa6; - -pub const tag_items_data_item_deprecation: usize = 0xa7; - -pub const tag_items_data_item_defaultness: usize = 0xa8; - -pub const tag_items_data_parent_impl: usize = 0xa9; - -pub const tag_rustc_version: usize = 0x10f; -pub fn rustc_version() -> String { - format!( - "rustc {}", - option_env!("CFG_VERSION").unwrap_or("unknown version") - ) -} - -pub const tag_panic_strategy: usize = 0x114; - -// NB: increment this if you change the format of metadata such that -// rustc_version can't be found. -pub const metadata_encoding_version : &'static [u8] = &[b'r', b'u', b's', b't', 0, 0, 0, 2]; diff --git a/src/librustc_metadata/creader.rs b/src/librustc_metadata/creader.rs index 4a656b180f..2e03b7868a 100644 --- a/src/librustc_metadata/creader.rs +++ b/src/librustc_metadata/creader.rs @@ -8,64 +8,53 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_camel_case_types)] - //! Validates all used crates and extern libraries and loads their metadata use cstore::{self, CStore, CrateSource, MetadataBlob}; -use decoder; use loader::{self, CratePaths}; +use macro_import; +use schema::CrateRoot; -use rustc::hir::def_id::DefIndex; +use rustc::hir::def_id::{CrateNum, DefIndex}; use rustc::hir::svh::Svh; -use rustc::dep_graph::{DepGraph, DepNode}; +use rustc::middle::cstore::LoadedMacro; use rustc::session::{config, Session}; use rustc::session::config::PanicStrategy; use rustc::session::search_paths::PathKind; +use rustc::middle; use rustc::middle::cstore::{CrateStore, validate_crate_name, ExternCrate}; use rustc::util::nodemap::{FnvHashMap, FnvHashSet}; use rustc::hir::map as hir_map; use std::cell::{RefCell, Cell}; +use std::ops::Deref; use std::path::PathBuf; use std::rc::Rc; use std::fs; use syntax::ast; use syntax::abi::Abi; -use syntax::codemap; use syntax::parse; use syntax::attr; -use syntax::attr::AttrMetaMethods; use syntax::parse::token::InternedString; -use syntax::visit; -use syntax_pos::{self, Span, mk_sp, Pos}; +use syntax_pos::{self, Span, mk_sp}; use log; -struct LocalCrateReader<'a> { - sess: &'a Session, +pub struct CrateLoader<'a> { + pub sess: &'a Session, + pub creader: CrateReader<'a>, cstore: &'a CStore, - creader: CrateReader<'a>, - krate: &'a ast::Crate, - definitions: &'a hir_map::Definitions, } pub struct CrateReader<'a> { sess: &'a Session, cstore: &'a CStore, - next_crate_num: ast::CrateNum, + next_crate_num: CrateNum, foreign_item_map: FnvHashMap>, local_crate_name: String, local_crate_config: ast::CrateConfig, } -impl<'a> visit::Visitor for LocalCrateReader<'a> { - fn visit_item(&mut self, a: &ast::Item) { - self.process_item(a); - visit::walk_item(self, a); - } -} - fn dump_crates(cstore: &CStore) { info!("resolved crates:"); cstore.iter_crate_data_origins(|_, data, opt_source| { @@ -86,7 +75,7 @@ fn should_link(i: &ast::Item) -> bool { } #[derive(Debug)] -struct CrateInfo { +struct ExternCrateInfo { ident: String, name: String, id: ast::NodeId, @@ -101,8 +90,10 @@ fn register_native_lib(sess: &Session, if name.is_empty() { match span { Some(span) => { - span_err!(sess, span, E0454, - "#[link(name = \"\")] given with empty name"); + struct_span_err!(sess, span, E0454, + "#[link(name = \"\")] given with empty name") + .span_label(span, &format!("empty name given")) + .emit(); } None => { sess.err("empty library name given via `-l`"); @@ -129,27 +120,45 @@ struct ExtensionCrate { metadata: PMDSource, dylib: Option, target_only: bool, + + ident: String, + name: String, + span: Span, + should_link: bool, } enum PMDSource { Registered(Rc), - Owned(MetadataBlob), + Owned(loader::Library), } -impl PMDSource { - pub fn as_slice<'a>(&'a self) -> &'a [u8] { +impl Deref for PMDSource { + type Target = MetadataBlob; + + fn deref(&self) -> &MetadataBlob { match *self { - PMDSource::Registered(ref cmd) => cmd.data(), - PMDSource::Owned(ref mdb) => mdb.as_slice(), + PMDSource::Registered(ref cmd) => &cmd.blob, + PMDSource::Owned(ref lib) => &lib.metadata } } } enum LoadResult { - Previous(ast::CrateNum), + Previous(CrateNum), Loaded(loader::Library), } +pub struct Macros { + pub macro_rules: Vec, + + /// An array of pairs where the first element is the name of the custom + /// derive (e.g. the trait being derived) and the second element is the + /// index of the definition. + pub custom_derive_registrar: Option, + pub svh: Svh, + pub dylib: Option, +} + impl<'a> CrateReader<'a> { pub fn new(sess: &'a Session, cstore: &'a CStore, @@ -166,7 +175,7 @@ impl<'a> CrateReader<'a> { } } - fn extract_crate_info(&self, i: &ast::Item) -> Option { + fn extract_crate_info(&self, i: &ast::Item) -> Option { match i.node { ast::ItemKind::ExternCrate(ref path_opt) => { debug!("resolving extern crate stmt. ident: {} path_opt: {:?}", @@ -179,7 +188,7 @@ impl<'a> CrateReader<'a> { } None => i.ident.to_string(), }; - Some(CrateInfo { + Some(ExternCrateInfo { ident: i.ident.to_string(), name: name, id: i.id, @@ -191,7 +200,7 @@ impl<'a> CrateReader<'a> { } fn existing_match(&self, name: &str, hash: Option<&Svh>, kind: PathKind) - -> Option { + -> Option { let mut ret = None; self.cstore.iter_crate_data(|cnum, data| { if data.name != name { return } @@ -241,32 +250,28 @@ impl<'a> CrateReader<'a> { fn verify_no_symbol_conflicts(&self, span: Span, - metadata: &MetadataBlob) { - let disambiguator = decoder::get_crate_disambiguator(metadata.as_slice()); - let crate_name = decoder::get_crate_name(metadata.as_slice()); - + root: &CrateRoot) { // Check for (potential) conflicts with the local crate - if self.local_crate_name == crate_name && - self.sess.local_crate_disambiguator() == disambiguator { + if self.local_crate_name == root.name && + self.sess.local_crate_disambiguator() == &root.disambiguator[..] { span_fatal!(self.sess, span, E0519, "the current crate is indistinguishable from one of its \ dependencies: it has the same crate-name `{}` and was \ compiled with the same `-C metadata` arguments. This \ will result in symbol conflicts between the two.", - crate_name) + root.name) } - let svh = decoder::get_crate_hash(metadata.as_slice()); // Check for conflicts with any crate loaded so far self.cstore.iter_crate_data(|_, other| { - if other.name() == crate_name && // same crate-name - other.disambiguator() == disambiguator && // same crate-disambiguator - other.hash() != svh { // but different SVH + if other.name() == root.name && // same crate-name + other.disambiguator() == root.disambiguator && // same crate-disambiguator + other.hash() != root.hash { // but different SVH span_fatal!(self.sess, span, E0523, "found two different crates with name `{}` that are \ not distinguished by differing `-C metadata`. This \ will result in symbol conflicts between the two.", - crate_name) + root.name) } }); } @@ -278,13 +283,15 @@ impl<'a> CrateReader<'a> { span: Span, lib: loader::Library, explicitly_linked: bool) - -> (ast::CrateNum, Rc, + -> (CrateNum, Rc, cstore::CrateSource) { - self.verify_no_symbol_conflicts(span, &lib.metadata); + info!("register crate `extern crate {} as {}`", name, ident); + let crate_root = lib.metadata.get_root(); + self.verify_no_symbol_conflicts(span, &crate_root); // Claim this crate number and cache it let cnum = self.next_crate_num; - self.next_crate_num += 1; + self.next_crate_num = CrateNum::from_u32(cnum.as_u32() + 1); // Stash paths for top-most crate locally if necessary. let crate_paths = if root.is_none() { @@ -301,20 +308,22 @@ impl<'a> CrateReader<'a> { let loader::Library { dylib, rlib, metadata } = lib; - let cnum_map = self.resolve_crate_deps(root, metadata.as_slice(), cnum, span); - let staged_api = self.is_staged_api(metadata.as_slice()); + let cnum_map = self.resolve_crate_deps(root, &crate_root, &metadata, cnum, span); + + if crate_root.macro_derive_registrar.is_some() { + self.sess.span_err(span, "crates of the `rustc-macro` crate type \ + cannot be linked at runtime"); + } let cmeta = Rc::new(cstore::CrateMetadata { name: name.to_string(), extern_crate: Cell::new(None), - index: decoder::load_index(metadata.as_slice()), - xref_index: decoder::load_xrefs(metadata.as_slice()), - key_map: decoder::load_key_map(metadata.as_slice()), - data: metadata, + key_map: metadata.load_key_map(crate_root.index), + root: crate_root, + blob: metadata, cnum_map: RefCell::new(cnum_map), cnum: cnum, codemap_import_info: RefCell::new(vec![]), - staged_api: staged_api, explicitly_linked: Cell::new(explicitly_linked), }); @@ -329,16 +338,6 @@ impl<'a> CrateReader<'a> { (cnum, cmeta, source) } - fn is_staged_api(&self, data: &[u8]) -> bool { - let attrs = decoder::get_crate_attributes(data); - for attr in &attrs { - if attr.name() == "stable" || attr.name() == "unstable" { - return true - } - } - false - } - fn resolve_crate(&mut self, root: &Option, ident: &str, @@ -347,10 +346,12 @@ impl<'a> CrateReader<'a> { span: Span, kind: PathKind, explicitly_linked: bool) - -> (ast::CrateNum, Rc, cstore::CrateSource) { + -> (CrateNum, Rc, cstore::CrateSource) { + info!("resolving crate `extern crate {} as {}`", name, ident); let result = match self.existing_match(name, hash, kind) { Some(cnum) => LoadResult::Previous(cnum), None => { + info!("falling back to a load"); let mut load_ctxt = loader::Context { sess: self.sess, span: span, @@ -403,14 +404,13 @@ impl<'a> CrateReader<'a> { // Note that we only do this for target triple crates, though, as we // don't want to match a host crate against an equivalent target one // already loaded. + let root = library.metadata.get_root(); if loader.triple == self.sess.opts.target_triple { - let meta_hash = decoder::get_crate_hash(library.metadata.as_slice()); - let meta_name = decoder::get_crate_name(library.metadata.as_slice()) - .to_string(); let mut result = LoadResult::Loaded(library); self.cstore.iter_crate_data(|cnum, data| { - if data.name() == meta_name && meta_hash == data.hash() { + if data.name() == root.name && root.hash == data.hash() { assert!(loader.hash.is_none()); + info!("load success, going to previous cnum: {}", cnum); result = LoadResult::Previous(cnum); } }); @@ -421,9 +421,9 @@ impl<'a> CrateReader<'a> { } fn update_extern_crate(&mut self, - cnum: ast::CrateNum, + cnum: CrateNum, mut extern_crate: ExternCrate, - visited: &mut FnvHashSet<(ast::CrateNum, bool)>) + visited: &mut FnvHashSet<(CrateNum, bool)>) { if !visited.insert((cnum, extern_crate.direct)) { return } @@ -455,33 +455,39 @@ impl<'a> CrateReader<'a> { // Go through the crate metadata and load any crates that it references fn resolve_crate_deps(&mut self, root: &Option, - cdata: &[u8], - krate: ast::CrateNum, + crate_root: &CrateRoot, + metadata: &MetadataBlob, + krate: CrateNum, span: Span) -> cstore::CrateNumMap { debug!("resolving deps of external crate"); // The map from crate numbers in the crate we're resolving to local crate // numbers - let map: FnvHashMap<_, _> = decoder::get_crate_deps(cdata).iter().map(|dep| { + let deps = crate_root.crate_deps.decode(metadata); + let map: FnvHashMap<_, _> = deps.enumerate().map(|(crate_num, dep)| { debug!("resolving dep crate {} hash: `{}`", dep.name, dep.hash); - let (local_cnum, _, _) = self.resolve_crate(root, - &dep.name, - &dep.name, + let (local_cnum, ..) = self.resolve_crate(root, + &dep.name.as_str(), + &dep.name.as_str(), Some(&dep.hash), span, PathKind::Dependency, dep.explicitly_linked); - (dep.cnum, local_cnum) + (CrateNum::new(crate_num + 1), local_cnum) }).collect(); - let max_cnum = map.values().cloned().max().unwrap_or(0); + let max_cnum = map.values().cloned().max().map(|cnum| cnum.as_u32()).unwrap_or(0); // we map 0 and all other holes in the map to our parent crate. The "additional" // self-dependencies should be harmless. - (0..max_cnum+1).map(|cnum| map.get(&cnum).cloned().unwrap_or(krate)).collect() + (0..max_cnum+1).map(|cnum| { + map.get(&CrateNum::from_u32(cnum)).cloned().unwrap_or(krate) + }).collect() } - fn read_extension_crate(&mut self, span: Span, info: &CrateInfo) -> ExtensionCrate { + fn read_extension_crate(&mut self, span: Span, info: &ExternCrateInfo) -> ExtensionCrate { + info!("read extension crate {} `extern crate {} as {}` linked={}", + info.id, info.name, info.ident, info.should_link); let target_triple = &self.sess.opts.target_triple[..]; let is_cross = target_triple != config::host_triple(); let mut should_link = info.should_link && !is_cross; @@ -532,16 +538,7 @@ impl<'a> CrateReader<'a> { } LoadResult::Loaded(library) => { let dylib = library.dylib.clone(); - let metadata = if should_link { - // Register crate now to avoid double-reading metadata - let (_, cmd, _) = self.register_crate(&None, &info.ident, - &info.name, span, - library, true); - PMDSource::Registered(cmd) - } else { - // Not registering the crate; just hold on to the metadata - PMDSource::Owned(library.metadata) - }; + let metadata = PMDSource::Owned(library); (dylib, metadata) } }; @@ -550,66 +547,103 @@ impl<'a> CrateReader<'a> { metadata: metadata, dylib: dylib.map(|p| p.0), target_only: target_only, + name: info.name.to_string(), + ident: info.ident.to_string(), + span: span, + should_link: should_link, } } - /// Read exported macros. - pub fn read_exported_macros(&mut self, item: &ast::Item) -> Vec { + pub fn read_macros(&mut self, item: &ast::Item) -> Macros { let ci = self.extract_crate_info(item).unwrap(); let ekrate = self.read_extension_crate(item.span, &ci); + let root = ekrate.metadata.get_root(); let source_name = format!("<{} macros>", item.ident); - let mut macros = vec![]; - decoder::each_exported_macro(ekrate.metadata.as_slice(), - |name, attrs, span, body| { - // NB: Don't use parse::parse_tts_from_source_str because it parses with - // quote_depth > 0. - let mut p = parse::new_parser_from_source_str(&self.sess.parse_sess, - self.local_crate_config.clone(), - source_name.clone(), - body); - let lo = p.span.lo; - let body = match p.parse_all_token_trees() { - Ok(body) => body, - Err(mut err) => { - err.emit(); - self.sess.abort_if_errors(); - unreachable!(); - } - }; - let local_span = mk_sp(lo, p.last_span.hi); - - // Mark the attrs as used - for attr in &attrs { - attr::mark_used(attr); + let mut ret = Macros { + macro_rules: Vec::new(), + custom_derive_registrar: None, + svh: root.hash, + dylib: None, + }; + for def in root.macro_defs.decode(&*ekrate.metadata) { + // NB: Don't use parse::parse_tts_from_source_str because it parses with + // quote_depth > 0. + let mut p = parse::new_parser_from_source_str(&self.sess.parse_sess, + self.local_crate_config.clone(), + source_name.clone(), + def.body); + let lo = p.span.lo; + let body = match p.parse_all_token_trees() { + Ok(body) => body, + Err(mut err) => { + err.emit(); + self.sess.abort_if_errors(); + unreachable!(); } + }; + let local_span = mk_sp(lo, p.last_span.hi); - macros.push(ast::MacroDef { - ident: ast::Ident::with_empty_ctxt(name), - attrs: attrs, - id: ast::DUMMY_NODE_ID, - span: local_span, - imported_from: Some(item.ident), - // overridden in plugin/load.rs - export: false, - use_locally: false, - allow_internal_unstable: false, - - body: body, - }); - self.sess.imported_macro_spans.borrow_mut() - .insert(local_span, (name.as_str().to_string(), span)); - true + // Mark the attrs as used + for attr in &def.attrs { + attr::mark_used(attr); } - ); - macros + + ret.macro_rules.push(ast::MacroDef { + ident: ast::Ident::with_empty_ctxt(def.name), + attrs: def.attrs, + id: ast::DUMMY_NODE_ID, + span: local_span, + imported_from: Some(item.ident), + // overridden in plugin/load.rs + export: false, + use_locally: false, + allow_internal_unstable: false, + + body: body, + }); + self.sess.imported_macro_spans.borrow_mut() + .insert(local_span, (def.name.as_str().to_string(), def.span)); + } + + match root.macro_derive_registrar { + Some(id) => ret.custom_derive_registrar = Some(id), + + // If this crate is not a rustc-macro crate then we might be able to + // register it with the local crate store to prevent loading the + // metadata twice. + // + // If it's a rustc-macro crate, though, then we definitely don't + // want to register it with the local crate store as we're just + // going to use it as we would a plugin. + None => { + ekrate.register(self); + return ret + } + } + + self.cstore.add_used_for_derive_macros(item); + ret.dylib = ekrate.dylib.clone(); + if ret.dylib.is_none() { + span_bug!(item.span, "rustc-macro crate not dylib"); + } + + if ekrate.target_only { + let message = format!("rustc-macro crate is not available for \ + triple `{}` (only found {})", + config::host_triple(), + self.sess.opts.target_triple); + self.sess.span_fatal(item.span, &message); + } + + return ret } /// Look for a plugin registrar. Returns library path, crate /// SVH and DefIndex of the registrar function. pub fn find_plugin_registrar(&mut self, span: Span, name: &str) -> Option<(PathBuf, Svh, DefIndex)> { - let ekrate = self.read_extension_crate(span, &CrateInfo { + let ekrate = self.read_extension_crate(span, &ExternCrateInfo { name: name.to_string(), ident: name.to_string(), id: ast::DUMMY_NODE_ID, @@ -626,13 +660,10 @@ impl<'a> CrateReader<'a> { span_fatal!(self.sess, span, E0456, "{}", &message[..]); } - let svh = decoder::get_crate_hash(ekrate.metadata.as_slice()); - let registrar = - decoder::get_plugin_registrar_fn(ekrate.metadata.as_slice()); - - match (ekrate.dylib.as_ref(), registrar) { + let root = ekrate.metadata.get_root(); + match (ekrate.dylib.as_ref(), root.plugin_registrar_fn) { (Some(dylib), Some(reg)) => { - Some((dylib.to_path_buf(), svh, reg)) + Some((dylib.to_path_buf(), root.hash, reg)) } (None, Some(_)) => { span_err!(self.sess, span, E0457, @@ -773,6 +804,7 @@ impl<'a> CrateReader<'a> { match *ct { config::CrateTypeExecutable => need_exe_alloc = true, config::CrateTypeDylib | + config::CrateTypeRustcMacro | config::CrateTypeCdylib | config::CrateTypeStaticlib => need_lib_alloc = true, config::CrateTypeRlib => {} @@ -817,7 +849,7 @@ impl<'a> CrateReader<'a> { } fn inject_dependency_if(&self, - krate: ast::CrateNum, + krate: CrateNum, what: &str, needs_dep: &Fn(&cstore::CrateMetadata) -> bool) { // don't perform this validation if the session has errors, as one of @@ -857,84 +889,43 @@ impl<'a> CrateReader<'a> { } } -impl<'a> LocalCrateReader<'a> { - fn new(sess: &'a Session, - cstore: &'a CStore, - defs: &'a hir_map::Definitions, - krate: &'a ast::Crate, - local_crate_name: &str) - -> LocalCrateReader<'a> { - LocalCrateReader { - sess: sess, - cstore: cstore, - creader: CrateReader::new(sess, cstore, local_crate_name, krate.config.clone()), - krate: krate, - definitions: defs, - } - } - - // Traverses an AST, reading all the information about use'd crates and - // extern libraries necessary for later resolving, typechecking, linking, - // etc. - fn read_crates(&mut self, dep_graph: &DepGraph) { - let _task = dep_graph.in_task(DepNode::CrateReader); - - self.process_crate(self.krate); - visit::walk_crate(self, self.krate); - self.creader.inject_allocator_crate(); - self.creader.inject_panic_runtime(self.krate); - - if log_enabled!(log::INFO) { - dump_crates(&self.cstore); +impl ExtensionCrate { + fn register(self, creader: &mut CrateReader) { + if !self.should_link { + return } - for &(ref name, kind) in &self.sess.opts.libs { - register_native_lib(self.sess, self.cstore, None, name.clone(), kind); - } - self.creader.register_statically_included_foreign_items(); - } + let library = match self.metadata { + PMDSource::Owned(lib) => lib, + PMDSource::Registered(_) => return, + }; - fn process_crate(&self, c: &ast::Crate) { - for a in c.attrs.iter().filter(|m| m.name() == "link_args") { - if let Some(ref linkarg) = a.value_str() { - self.cstore.add_used_link_args(&linkarg); - } - } + // Register crate now to avoid double-reading metadata + creader.register_crate(&None, + &self.ident, + &self.name, + self.span, + library, + true); } +} - fn process_item(&mut self, i: &ast::Item) { - match i.node { - ast::ItemKind::ExternCrate(_) => { - if !should_link(i) { - return; - } +impl<'a> CrateLoader<'a> { + pub fn new(sess: &'a Session, cstore: &'a CStore, krate: &ast::Crate, crate_name: &str) + -> Self { + let loader = CrateLoader { + sess: sess, + cstore: cstore, + creader: CrateReader::new(sess, cstore, crate_name, krate.config.clone()), + }; - if let Some(info) = self.creader.extract_crate_info(i) { - let (cnum, _, _) = self.creader.resolve_crate(&None, - &info.ident, - &info.name, - None, - i.span, - PathKind::Crate, - true); - - let def_id = self.definitions.opt_local_def_id(i.id).unwrap(); - let len = self.definitions.def_path(def_id.index).data.len(); - - self.creader.update_extern_crate(cnum, - ExternCrate { - def_id: def_id, - span: i.span, - direct: true, - path_len: len, - }, - &mut FnvHashSet()); - self.cstore.add_extern_mod_stmt_cnum(info.id, cnum); - } + for attr in krate.attrs.iter().filter(|m| m.name() == "link_args") { + if let Some(ref linkarg) = attr.value_str() { + loader.cstore.add_used_link_args(&linkarg); } - ast::ItemKind::ForeignMod(ref fm) => self.process_foreign_mod(i, fm), - _ => { } } + + loader } fn process_foreign_mod(&mut self, i: &ast::Item, fm: &ast::ForeignMod) { @@ -963,8 +954,9 @@ impl<'a> LocalCrateReader<'a> { Some("dylib") => cstore::NativeUnknown, Some("framework") => cstore::NativeFramework, Some(k) => { - span_err!(self.sess, m.span, E0458, - "unknown kind: `{}`", k); + struct_span_err!(self.sess, m.span, E0458, + "unknown kind: `{}`", k) + .span_label(m.span, &format!("unknown kind")).emit(); cstore::NativeUnknown } None => cstore::NativeUnknown @@ -975,8 +967,9 @@ impl<'a> LocalCrateReader<'a> { let n = match n { Some(n) => n, None => { - span_err!(self.sess, m.span, E0459, - "#[link(...)] specified without `name = \"foo\"`"); + struct_span_err!(self.sess, m.span, E0459, + "#[link(...)] specified without `name = \"foo\"`") + .span_label(m.span, &format!("missing `name` argument")).emit(); InternedString::new("foo") } }; @@ -996,143 +989,62 @@ impl<'a> LocalCrateReader<'a> { } } -/// Traverses an AST, reading all the information about use'd crates and extern -/// libraries necessary for later resolving, typechecking, linking, etc. -pub fn read_local_crates(sess: & Session, - cstore: & CStore, - defs: & hir_map::Definitions, - krate: & ast::Crate, - local_crate_name: &str, - dep_graph: &DepGraph) { - LocalCrateReader::new(sess, cstore, defs, krate, local_crate_name).read_crates(dep_graph) -} - -/// Imports the codemap from an external crate into the codemap of the crate -/// currently being compiled (the "local crate"). -/// -/// The import algorithm works analogous to how AST items are inlined from an -/// external crate's metadata: -/// For every FileMap in the external codemap an 'inline' copy is created in the -/// local codemap. The correspondence relation between external and local -/// FileMaps is recorded in the `ImportedFileMap` objects returned from this -/// function. When an item from an external crate is later inlined into this -/// crate, this correspondence information is used to translate the span -/// information of the inlined item so that it refers the correct positions in -/// the local codemap (see `astencode::DecodeContext::tr_span()`). -/// -/// The import algorithm in the function below will reuse FileMaps already -/// existing in the local codemap. For example, even if the FileMap of some -/// source file of libstd gets imported many times, there will only ever be -/// one FileMap object for the corresponding file in the local codemap. -/// -/// Note that imported FileMaps do not actually contain the source code of the -/// file they represent, just information about length, line breaks, and -/// multibyte characters. This information is enough to generate valid debuginfo -/// for items inlined from other crates. -pub fn import_codemap(local_codemap: &codemap::CodeMap, - metadata: &MetadataBlob) - -> Vec { - let external_codemap = decoder::get_imported_filemaps(metadata.as_slice()); - - let imported_filemaps = external_codemap.into_iter().map(|filemap_to_import| { - // Try to find an existing FileMap that can be reused for the filemap to - // be imported. A FileMap is reusable if it is exactly the same, just - // positioned at a different offset within the codemap. - let reusable_filemap = { - local_codemap.files - .borrow() - .iter() - .find(|fm| are_equal_modulo_startpos(&fm, &filemap_to_import)) - .map(|rc| rc.clone()) - }; - - match reusable_filemap { - Some(fm) => { - cstore::ImportedFileMap { - original_start_pos: filemap_to_import.start_pos, - original_end_pos: filemap_to_import.end_pos, - translated_filemap: fm - } - } - None => { - // We can't reuse an existing FileMap, so allocate a new one - // containing the information we need. - let syntax_pos::FileMap { - name, - abs_path, - start_pos, - end_pos, - lines, - multibyte_chars, - .. - } = filemap_to_import; - - let source_length = (end_pos - start_pos).to_usize(); - - // Translate line-start positions and multibyte character - // position into frame of reference local to file. - // `CodeMap::new_imported_filemap()` will then translate those - // coordinates to their new global frame of reference when the - // offset of the FileMap is known. - let mut lines = lines.into_inner(); - for pos in &mut lines { - *pos = *pos - start_pos; - } - let mut multibyte_chars = multibyte_chars.into_inner(); - for mbc in &mut multibyte_chars { - mbc.pos = mbc.pos - start_pos; - } +impl<'a> middle::cstore::CrateLoader for CrateLoader<'a> { + fn postprocess(&mut self, krate: &ast::Crate) { + self.creader.inject_allocator_crate(); + self.creader.inject_panic_runtime(krate); - let local_version = local_codemap.new_imported_filemap(name, - abs_path, - source_length, - lines, - multibyte_chars); - cstore::ImportedFileMap { - original_start_pos: start_pos, - original_end_pos: end_pos, - translated_filemap: local_version - } - } + if log_enabled!(log::INFO) { + dump_crates(&self.cstore); } - }).collect(); - - return imported_filemaps; - fn are_equal_modulo_startpos(fm1: &syntax_pos::FileMap, - fm2: &syntax_pos::FileMap) - -> bool { - if fm1.name != fm2.name { - return false; + for &(ref name, kind) in &self.sess.opts.libs { + register_native_lib(self.sess, self.cstore, None, name.clone(), kind); } + self.creader.register_statically_included_foreign_items(); + } - let lines1 = fm1.lines.borrow(); - let lines2 = fm2.lines.borrow(); - - if lines1.len() != lines2.len() { - return false; + fn process_item(&mut self, item: &ast::Item, definitions: &hir_map::Definitions) { + match item.node { + ast::ItemKind::ExternCrate(_) => {} + ast::ItemKind::ForeignMod(ref fm) => return self.process_foreign_mod(item, fm), + _ => return, } - for (&line1, &line2) in lines1.iter().zip(lines2.iter()) { - if (line1 - fm1.start_pos) != (line2 - fm2.start_pos) { - return false; + // If this `extern crate` item has `#[macro_use]` then we can safely skip it. + // These annotations were processed during macro expansion and are already loaded + // (if necessary) into our crate store. + // + // Note that it's important we *don't* fall through below as some `#[macro_use]` + // crates are explicitly not linked (e.g. macro crates) so we want to ensure + // we avoid `resolve_crate` with those. + if attr::contains_name(&item.attrs, "macro_use") { + if self.cstore.was_used_for_derive_macros(item) { + return } } - let multibytes1 = fm1.multibyte_chars.borrow(); - let multibytes2 = fm2.multibyte_chars.borrow(); + if let Some(info) = self.creader.extract_crate_info(item) { + if !info.should_link { + return; + } - if multibytes1.len() != multibytes2.len() { - return false; - } + let (cnum, ..) = self.creader.resolve_crate( + &None, &info.ident, &info.name, None, item.span, PathKind::Crate, true, + ); - for (mb1, mb2) in multibytes1.iter().zip(multibytes2.iter()) { - if (mb1.bytes != mb2.bytes) || - ((mb1.pos - fm1.start_pos) != (mb2.pos - fm2.start_pos)) { - return false; - } + let def_id = definitions.opt_local_def_id(item.id).unwrap(); + let len = definitions.def_path(def_id.index).data.len(); + + let extern_crate = + ExternCrate { def_id: def_id, span: item.span, direct: true, path_len: len }; + self.creader.update_extern_crate(cnum, extern_crate, &mut FnvHashSet()); + + self.cstore.add_extern_mod_stmt_cnum(info.id, cnum); } + } - true + fn load_macros(&mut self, extern_crate: &ast::Item, allows_macros: bool) -> Vec { + macro_import::load_macros(self, extern_crate, allows_macros) } } diff --git a/src/librustc_metadata/csearch.rs b/src/librustc_metadata/csearch.rs index 7ee6e54a66..1f25136ffe 100644 --- a/src/librustc_metadata/csearch.rs +++ b/src/librustc_metadata/csearch.rs @@ -9,28 +9,25 @@ // except according to those terms. use cstore; -use common; -use decoder; use encoder; use loader; +use schema; -use middle::cstore::{InlinedItem, CrateStore, CrateSource, ChildItem, ExternCrate, DefLike}; -use middle::cstore::{NativeLibraryKind, LinkMeta, LinkagePreference}; -use rustc::hir::def; -use middle::lang_items; -use rustc::ty::{self, Ty, TyCtxt, VariantKind}; -use rustc::hir::def_id::{DefId, DefIndex, CRATE_DEF_INDEX}; +use rustc::middle::cstore::{InlinedItem, CrateStore, CrateSource, ExternCrate}; +use rustc::middle::cstore::{NativeLibraryKind, LinkMeta, LinkagePreference}; +use rustc::hir::def::{self, Def}; +use rustc::middle::lang_items; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX}; use rustc::dep_graph::DepNode; use rustc::hir::map as hir_map; use rustc::hir::map::DefKey; use rustc::mir::repr::Mir; use rustc::mir::mir_map::MirMap; -use rustc::util::nodemap::{FnvHashMap, NodeSet, DefIdMap}; +use rustc::util::nodemap::{NodeSet, DefIdMap}; use rustc::session::config::PanicStrategy; -use std::cell::RefCell; -use std::rc::Rc; use std::path::PathBuf; use syntax::ast; use syntax::attr; @@ -40,255 +37,183 @@ use rustc_back::target::Target; use rustc::hir; impl<'tcx> CrateStore<'tcx> for cstore::CStore { + fn describe_def(&self, def: DefId) -> Option { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_def(def.index) + } + fn stability(&self, def: DefId) -> Option { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_stability(&cdata, def.index) + self.get_crate_data(def.krate).get_stability(def.index) } fn deprecation(&self, def: DefId) -> Option { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_deprecation(&cdata, def.index) + self.get_crate_data(def.krate).get_deprecation(def.index) } fn visibility(&self, def: DefId) -> ty::Visibility { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_visibility(&cdata, def.index) + self.get_crate_data(def.krate).get_visibility(def.index) } fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind { assert!(!def_id.is_local()); self.dep_graph.read(DepNode::MetaData(def_id)); - let cdata = self.get_crate_data(def_id.krate); - decoder::closure_kind(&cdata, def_id.index) + self.get_crate_data(def_id.krate).closure_kind(def_id.index) } fn closure_ty<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ty::ClosureTy<'tcx> { assert!(!def_id.is_local()); self.dep_graph.read(DepNode::MetaData(def_id)); - let cdata = self.get_crate_data(def_id.krate); - decoder::closure_ty(&cdata, def_id.index, tcx) + self.get_crate_data(def_id.krate).closure_ty(def_id.index, tcx) } - fn item_variances(&self, def: DefId) -> ty::ItemVariances { + fn item_variances(&self, def: DefId) -> Vec { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_item_variances(&cdata, def.index) - } - - fn repr_attrs(&self, def: DefId) -> Vec { - self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_repr_attrs(&cdata, def.index) + self.get_crate_data(def.krate).get_item_variances(def.index) } fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) - -> ty::TypeScheme<'tcx> + -> Ty<'tcx> { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_type(&cdata, def.index, tcx) + self.get_crate_data(def.krate).get_type(def.index, tcx) } fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::GenericPredicates<'tcx> { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_predicates(&cdata, def.index, tcx) + self.get_crate_data(def.krate).get_predicates(def.index, tcx) } fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::GenericPredicates<'tcx> { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_super_predicates(&cdata, def.index, tcx) + self.get_crate_data(def.krate).get_super_predicates(def.index, tcx) + } + + fn item_generics<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> ty::Generics<'tcx> + { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_generics(def.index, tcx) } fn item_attrs(&self, def_id: DefId) -> Vec { self.dep_graph.read(DepNode::MetaData(def_id)); - let cdata = self.get_crate_data(def_id.krate); - decoder::get_item_attrs(&cdata, def_id.index) + self.get_crate_data(def_id.krate).get_item_attrs(def_id.index) } fn trait_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::TraitDef<'tcx> { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_trait_def(&cdata, def.index, tcx) + self.get_crate_data(def.krate).get_trait_def(def.index, tcx) } fn adt_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx> { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_adt_def(&cdata, def.index, tcx) + self.get_crate_data(def.krate).get_adt_def(def.index, tcx) } - fn method_arg_names(&self, did: DefId) -> Vec + fn fn_arg_names(&self, did: DefId) -> Vec { self.dep_graph.read(DepNode::MetaData(did)); - let cdata = self.get_crate_data(did.krate); - decoder::get_method_arg_names(&cdata, did.index) - } - - fn item_name(&self, def: DefId) -> ast::Name { - self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_item_name(&cdata, def.index) - } - - fn opt_item_name(&self, def: DefId) -> Option { - self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::maybe_get_item_name(&cdata, def.index) + self.get_crate_data(did.krate).get_fn_arg_names(did.index) } fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec { self.dep_graph.read(DepNode::MetaData(def_id)); - let mut result = vec![]; - let cdata = self.get_crate_data(def_id.krate); - decoder::each_inherent_implementation_for_type(&cdata, def_id.index, - |iid| result.push(iid)); - result + self.get_crate_data(def_id.krate).get_inherent_implementations_for_type(def_id.index) } - fn implementations_of_trait(&self, def_id: DefId) -> Vec + fn implementations_of_trait(&self, filter: Option) -> Vec { - self.dep_graph.read(DepNode::MetaData(def_id)); + if let Some(def_id) = filter { + self.dep_graph.read(DepNode::MetaData(def_id)); + } let mut result = vec![]; self.iter_crate_data(|_, cdata| { - decoder::each_implementation_for_trait(cdata, def_id, &mut |iid| { - result.push(iid) - }) + cdata.get_implementations_for_trait(filter, &mut result) }); result } - fn provided_trait_methods<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) - -> Vec>> - { - self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_provided_trait_methods(&cdata, def.index, tcx) - } - - fn trait_item_def_ids(&self, def: DefId) - -> Vec - { - self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_trait_item_def_ids(&cdata, def.index) - } - - fn impl_items(&self, impl_def_id: DefId) -> Vec - { - self.dep_graph.read(DepNode::MetaData(impl_def_id)); - let cdata = self.get_crate_data(impl_def_id.krate); - decoder::get_impl_items(&cdata, impl_def_id.index) + fn impl_or_trait_items(&self, def_id: DefId) -> Vec { + self.dep_graph.read(DepNode::MetaData(def_id)); + let mut result = vec![]; + self.get_crate_data(def_id.krate) + .each_child_of_item(def_id.index, |child| result.push(child.def_id)); + result } - fn impl_polarity(&self, def: DefId) -> Option + fn impl_polarity(&self, def: DefId) -> hir::ImplPolarity { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_impl_polarity(&cdata, def.index) + self.get_crate_data(def.krate).get_impl_polarity(def.index) } fn impl_trait_ref<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> Option> { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_impl_trait(&cdata, def.index, tcx) + self.get_crate_data(def.krate).get_impl_trait(def.index, tcx) } fn custom_coerce_unsized_kind(&self, def: DefId) -> Option { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_custom_coerce_unsized_kind(&cdata, def.index) - } - - // FIXME: killme - fn associated_consts<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) - -> Vec>> { - self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_associated_consts(&cdata, def.index, tcx) + self.get_crate_data(def.krate).get_custom_coerce_unsized_kind(def.index) } fn impl_parent(&self, impl_def: DefId) -> Option { self.dep_graph.read(DepNode::MetaData(impl_def)); - let cdata = self.get_crate_data(impl_def.krate); - decoder::get_parent_impl(&*cdata, impl_def.index) + self.get_crate_data(impl_def.krate).get_parent_impl(impl_def.index) } - fn trait_of_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Option - { + fn trait_of_item(&self, def_id: DefId) -> Option { self.dep_graph.read(DepNode::MetaData(def_id)); - let cdata = self.get_crate_data(def_id.krate); - decoder::get_trait_of_item(&cdata, def_id.index, tcx) + self.get_crate_data(def_id.krate).get_trait_of_item(def_id.index) } fn impl_or_trait_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> Option> { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_impl_or_trait_item(&cdata, def.index, tcx) + self.get_crate_data(def.krate).get_impl_or_trait_item(def.index, tcx) } fn is_const_fn(&self, did: DefId) -> bool { self.dep_graph.read(DepNode::MetaData(did)); - let cdata = self.get_crate_data(did.krate); - decoder::is_const_fn(&cdata, did.index) + self.get_crate_data(did.krate).is_const_fn(did.index) } fn is_defaulted_trait(&self, trait_def_id: DefId) -> bool { self.dep_graph.read(DepNode::MetaData(trait_def_id)); - let cdata = self.get_crate_data(trait_def_id.krate); - decoder::is_defaulted_trait(&cdata, trait_def_id.index) - } - - fn is_impl(&self, did: DefId) -> bool - { - self.dep_graph.read(DepNode::MetaData(did)); - let cdata = self.get_crate_data(did.krate); - decoder::is_impl(&cdata, did.index) + self.get_crate_data(trait_def_id.krate).is_defaulted_trait(trait_def_id.index) } fn is_default_impl(&self, impl_did: DefId) -> bool { self.dep_graph.read(DepNode::MetaData(impl_did)); - let cdata = self.get_crate_data(impl_did.krate); - decoder::is_default_impl(&cdata, impl_did.index) + self.get_crate_data(impl_did.krate).is_default_impl(impl_did.index) } fn is_extern_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, did: DefId) -> bool { self.dep_graph.read(DepNode::MetaData(did)); - let cdata = self.get_crate_data(did.krate); - decoder::is_extern_item(&cdata, did.index, tcx) + self.get_crate_data(did.krate).is_extern_item(did.index, tcx) } fn is_foreign_item(&self, did: DefId) -> bool { - let cdata = self.get_crate_data(did.krate); - decoder::is_foreign_item(&cdata, did.index) - } - - fn is_static_method(&self, def: DefId) -> bool - { - self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::is_static_method(&cdata, def.index) + self.get_crate_data(did.krate).is_foreign_item(did.index) } fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool @@ -296,121 +221,100 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { self.do_is_statically_included_foreign_item(id) } - fn is_typedef(&self, did: DefId) -> bool { - self.dep_graph.read(DepNode::MetaData(did)); - let cdata = self.get_crate_data(did.krate); - decoder::is_typedef(&cdata, did.index) - } - - fn dylib_dependency_formats(&self, cnum: ast::CrateNum) - -> Vec<(ast::CrateNum, LinkagePreference)> + fn dylib_dependency_formats(&self, cnum: CrateNum) + -> Vec<(CrateNum, LinkagePreference)> { - let cdata = self.get_crate_data(cnum); - decoder::get_dylib_dependency_formats(&cdata) + self.get_crate_data(cnum).get_dylib_dependency_formats() } - fn lang_items(&self, cnum: ast::CrateNum) -> Vec<(DefIndex, usize)> + fn lang_items(&self, cnum: CrateNum) -> Vec<(DefIndex, usize)> { - let mut result = vec![]; - let crate_data = self.get_crate_data(cnum); - decoder::each_lang_item(&crate_data, |did, lid| { - result.push((did, lid)); true - }); - result + self.get_crate_data(cnum).get_lang_items() } - fn missing_lang_items(&self, cnum: ast::CrateNum) + fn missing_lang_items(&self, cnum: CrateNum) -> Vec { - let cdata = self.get_crate_data(cnum); - decoder::get_missing_lang_items(&cdata) + self.get_crate_data(cnum).get_missing_lang_items() } - fn is_staged_api(&self, cnum: ast::CrateNum) -> bool + fn is_staged_api(&self, cnum: CrateNum) -> bool { - self.get_crate_data(cnum).staged_api + self.get_crate_data(cnum).is_staged_api() } - fn is_explicitly_linked(&self, cnum: ast::CrateNum) -> bool + fn is_explicitly_linked(&self, cnum: CrateNum) -> bool { self.get_crate_data(cnum).explicitly_linked.get() } - fn is_allocator(&self, cnum: ast::CrateNum) -> bool + fn is_allocator(&self, cnum: CrateNum) -> bool { self.get_crate_data(cnum).is_allocator() } - fn is_panic_runtime(&self, cnum: ast::CrateNum) -> bool + fn is_panic_runtime(&self, cnum: CrateNum) -> bool { self.get_crate_data(cnum).is_panic_runtime() } - fn panic_strategy(&self, cnum: ast::CrateNum) -> PanicStrategy { - self.get_crate_data(cnum).panic_strategy() + fn is_compiler_builtins(&self, cnum: CrateNum) -> bool { + self.get_crate_data(cnum).is_compiler_builtins() } - fn crate_attrs(&self, cnum: ast::CrateNum) -> Vec - { - decoder::get_crate_attributes(self.get_crate_data(cnum).data()) + fn panic_strategy(&self, cnum: CrateNum) -> PanicStrategy { + self.get_crate_data(cnum).panic_strategy() } - fn crate_name(&self, cnum: ast::CrateNum) -> token::InternedString + fn crate_name(&self, cnum: CrateNum) -> token::InternedString { token::intern_and_get_ident(&self.get_crate_data(cnum).name[..]) } - fn original_crate_name(&self, cnum: ast::CrateNum) -> token::InternedString + fn original_crate_name(&self, cnum: CrateNum) -> token::InternedString { token::intern_and_get_ident(&self.get_crate_data(cnum).name()) } - fn extern_crate(&self, cnum: ast::CrateNum) -> Option + fn extern_crate(&self, cnum: CrateNum) -> Option { self.get_crate_data(cnum).extern_crate.get() } - fn crate_hash(&self, cnum: ast::CrateNum) -> Svh + fn crate_hash(&self, cnum: CrateNum) -> Svh { - let cdata = self.get_crate_data(cnum); - decoder::get_crate_hash(cdata.data()) + self.get_crate_hash(cnum) } - fn crate_disambiguator(&self, cnum: ast::CrateNum) -> token::InternedString + fn crate_disambiguator(&self, cnum: CrateNum) -> token::InternedString { - let cdata = self.get_crate_data(cnum); - token::intern_and_get_ident(decoder::get_crate_disambiguator(cdata.data())) + token::intern_and_get_ident(&self.get_crate_data(cnum).disambiguator()) } - fn crate_struct_field_attrs(&self, cnum: ast::CrateNum) - -> FnvHashMap> + fn plugin_registrar_fn(&self, cnum: CrateNum) -> Option { - decoder::get_struct_field_attrs(&self.get_crate_data(cnum)) - } - - fn plugin_registrar_fn(&self, cnum: ast::CrateNum) -> Option - { - let cdata = self.get_crate_data(cnum); - decoder::get_plugin_registrar_fn(cdata.data()).map(|index| DefId { + self.get_crate_data(cnum).root.plugin_registrar_fn.map(|index| DefId { krate: cnum, index: index }) } - fn native_libraries(&self, cnum: ast::CrateNum) -> Vec<(NativeLibraryKind, String)> + fn native_libraries(&self, cnum: CrateNum) -> Vec<(NativeLibraryKind, String)> { - let cdata = self.get_crate_data(cnum); - decoder::get_native_libraries(&cdata) + self.get_crate_data(cnum).get_native_libraries() } - fn reachable_ids(&self, cnum: ast::CrateNum) -> Vec + fn reachable_ids(&self, cnum: CrateNum) -> Vec { - let cdata = self.get_crate_data(cnum); - decoder::get_reachable_ids(&cdata) + self.get_crate_data(cnum).get_reachable_ids() + } + + fn is_no_builtins(&self, cnum: CrateNum) -> bool { + self.get_crate_data(cnum).is_no_builtins() } fn def_index_for_def_key(&self, - cnum: ast::CrateNum, + cnum: CrateNum, def: DefKey) -> Option { let cdata = self.get_crate_data(cnum); @@ -421,64 +325,47 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { /// parent `DefId` as well as some idea of what kind of data the /// `DefId` refers to. fn def_key(&self, def: DefId) -> hir_map::DefKey { - self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::def_key(&cdata, def.index) + // Note: loading the def-key (or def-path) for a def-id is not + // a *read* of its metadata. This is because the def-id is + // really just an interned shorthand for a def-path, which is the + // canonical name for an item. + // + // self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).def_key(def.index) } - fn relative_def_path(&self, def: DefId) -> hir_map::DefPath { - self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::def_path(&cdata, def.index) + fn relative_def_path(&self, def: DefId) -> Option { + // See `Note` above in `def_key()` for why this read is + // commented out: + // + // self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).def_path(def.index) } - fn variant_kind(&self, def_id: DefId) -> Option { + fn variant_kind(&self, def_id: DefId) -> Option + { self.dep_graph.read(DepNode::MetaData(def_id)); - let cdata = self.get_crate_data(def_id.krate); - decoder::get_variant_kind(&cdata, def_id.index) + self.get_crate_data(def_id.krate).get_variant_kind(def_id.index) } fn struct_ctor_def_id(&self, struct_def_id: DefId) -> Option { self.dep_graph.read(DepNode::MetaData(struct_def_id)); - let cdata = self.get_crate_data(struct_def_id.krate); - decoder::get_struct_ctor_def_id(&cdata, struct_def_id.index) - } - - fn tuple_struct_definition_if_ctor(&self, did: DefId) -> Option - { - self.dep_graph.read(DepNode::MetaData(did)); - let cdata = self.get_crate_data(did.krate); - decoder::get_tuple_struct_definition_if_ctor(&cdata, did.index) + self.get_crate_data(struct_def_id.krate).get_struct_ctor_def_id(struct_def_id.index) } fn struct_field_names(&self, def: DefId) -> Vec { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::get_struct_field_names(&cdata, def.index) + self.get_crate_data(def.krate).get_struct_field_names(def.index) } - fn item_children(&self, def_id: DefId) -> Vec + fn item_children(&self, def_id: DefId) -> Vec { self.dep_graph.read(DepNode::MetaData(def_id)); let mut result = vec![]; - let crate_data = self.get_crate_data(def_id.krate); - let get_crate_data = |cnum| self.get_crate_data(cnum); - decoder::each_child_of_item(&crate_data, def_id.index, get_crate_data, |def, name, vis| { - result.push(ChildItem { def: def, name: name, vis: vis }); - }); - result - } - - fn crate_top_level_items(&self, cnum: ast::CrateNum) -> Vec - { - let mut result = vec![]; - let crate_data = self.get_crate_data(cnum); - let get_crate_data = |cnum| self.get_crate_data(cnum); - decoder::each_top_level_item_of_crate(&crate_data, get_crate_data, |def, name, vis| { - result.push(ChildItem { def: def, name: name, vis: vis }); - }); + self.get_crate_data(def_id.krate) + .each_child_of_item(def_id.index, |child| result.push(child)); result } @@ -507,8 +394,7 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { debug!("maybe_get_item_ast({}): inlining item", tcx.item_path_str(def_id)); - let cdata = self.get_crate_data(def_id.krate); - let inlined = decoder::maybe_get_item_ast(&cdata, tcx, def_id.index); + let inlined = self.get_crate_data(def_id.krate).maybe_get_item_ast(tcx, def_id.index); let cache_inlined_item = |original_def_id, inlined_item_id, inlined_root_node_id| { let cache_entry = cstore::CachedInlinedItem { @@ -541,50 +427,17 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { }; match inlined { - decoder::FoundAst::NotFound => { + None => { self.inlined_item_cache .borrow_mut() .insert(def_id, None); } - decoder::FoundAst::Found(&InlinedItem::Item(d, ref item)) => { + Some(&InlinedItem::Item(d, ref item)) => { assert_eq!(d, def_id); let inlined_root_node_id = find_inlined_item_root(item.id); cache_inlined_item(def_id, item.id, inlined_root_node_id); } - decoder::FoundAst::Found(&InlinedItem::Foreign(d, ref item)) => { - assert_eq!(d, def_id); - let inlined_root_node_id = find_inlined_item_root(item.id); - cache_inlined_item(def_id, item.id, inlined_root_node_id); - } - decoder::FoundAst::FoundParent(parent_did, item) => { - let inlined_root_node_id = find_inlined_item_root(item.id); - cache_inlined_item(parent_did, item.id, inlined_root_node_id); - - match item.node { - hir::ItemEnum(ref ast_def, _) => { - let ast_vs = &ast_def.variants; - let ty_vs = &tcx.lookup_adt_def(parent_did).variants; - assert_eq!(ast_vs.len(), ty_vs.len()); - for (ast_v, ty_v) in ast_vs.iter().zip(ty_vs.iter()) { - cache_inlined_item(ty_v.did, - ast_v.node.data.id(), - inlined_root_node_id); - } - } - hir::ItemStruct(ref struct_def, _) => { - if struct_def.is_struct() { - bug!("instantiate_inline: called on a non-tuple struct") - } else { - cache_inlined_item(def_id, - struct_def.id(), - inlined_root_node_id); - } - } - _ => bug!("instantiate_inline: item has a \ - non-enum, non-struct parent") - } - } - decoder::FoundAst::Found(&InlinedItem::TraitItem(_, ref trait_item)) => { + Some(&InlinedItem::TraitItem(_, ref trait_item)) => { let inlined_root_node_id = find_inlined_item_root(trait_item.id); cache_inlined_item(def_id, trait_item.id, inlined_root_node_id); @@ -597,7 +450,7 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { tcx.impl_or_trait_items.borrow_mut() .insert(trait_item_def_id, ty_trait_item); } - decoder::FoundAst::Found(&InlinedItem::ImplItem(_, ref impl_item)) => { + Some(&InlinedItem::ImplItem(_, ref impl_item)) => { let inlined_root_node_id = find_inlined_item_root(impl_item.id); cache_inlined_item(def_id, impl_item.id, inlined_root_node_id); } @@ -629,17 +482,15 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { fn maybe_get_item_mir<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> Option> { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::maybe_get_item_mir(&cdata, tcx, def.index) + self.get_crate_data(def.krate).maybe_get_item_mir(tcx, def.index) } fn is_item_mir_available(&self, def: DefId) -> bool { self.dep_graph.read(DepNode::MetaData(def)); - let cdata = self.get_crate_data(def.krate); - decoder::is_item_mir_available(&cdata, def.index) + self.get_crate_data(def.krate).is_item_mir_available(def.index) } - fn crates(&self) -> Vec + fn crates(&self) -> Vec { let mut result = vec![]; self.iter_crate_data(|cnum, _| result.push(cnum)); @@ -665,26 +516,18 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { { loader::meta_section_name(target) } - fn encode_type<'a>(&self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - def_id_to_string: for<'b> fn(TyCtxt<'b, 'tcx, 'tcx>, DefId) -> String) - -> Vec - { - encoder::encoded_ty(tcx, ty, def_id_to_string) - } - fn used_crates(&self, prefer: LinkagePreference) -> Vec<(ast::CrateNum, Option)> + fn used_crates(&self, prefer: LinkagePreference) -> Vec<(CrateNum, Option)> { self.do_get_used_crates(prefer) } - fn used_crate_source(&self, cnum: ast::CrateNum) -> CrateSource + fn used_crate_source(&self, cnum: CrateNum) -> CrateSource { self.opt_used_crate_source(cnum).unwrap() } - fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option + fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option { self.do_extern_mod_stmt_cnum(emod_id) } @@ -693,26 +536,14 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { reexports: &def::ExportMap, link_meta: &LinkMeta, reachable: &NodeSet, - mir_map: &MirMap<'tcx>, - krate: &hir::Crate) -> Vec - { - let ecx = encoder::EncodeContext { - diag: tcx.sess.diagnostic(), - tcx: tcx, - reexports: reexports, - link_meta: link_meta, - cstore: self, - reachable: reachable, - mir_map: mir_map, - type_abbrevs: RefCell::new(FnvHashMap()), - }; - encoder::encode_metadata(ecx, krate) - + mir_map: &MirMap<'tcx>) -> Vec + { + encoder::encode_metadata(tcx, self, reexports, link_meta, reachable, mir_map) } fn metadata_encoding_version(&self) -> &[u8] { - common::metadata_encoding_version + schema::METADATA_HEADER } /// Returns a map from a sufficiently visible external item (i.e. an external item that is @@ -722,10 +553,9 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { let mut visible_parent_map = self.visible_parent_map.borrow_mut(); if !visible_parent_map.is_empty() { return visible_parent_map; } - use rustc::middle::cstore::ChildItem; use std::collections::vec_deque::VecDeque; use std::collections::hash_map::Entry; - for cnum in 1 .. self.next_crate_num() { + for cnum in (1 .. self.next_crate_num().as_usize()).map(CrateNum::new) { let cdata = self.get_crate_data(cnum); match cdata.extern_crate.get() { @@ -735,11 +565,12 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { } let mut bfs_queue = &mut VecDeque::new(); - let mut add_child = |bfs_queue: &mut VecDeque<_>, child: ChildItem, parent: DefId| { - let child = match child.def { - DefLike::DlDef(def) if child.vis == ty::Visibility::Public => def.def_id(), - _ => return, - }; + let mut add_child = |bfs_queue: &mut VecDeque<_>, child: def::Export, parent: DefId| { + let child = child.def_id; + + if self.visibility(child) != ty::Visibility::Public { + return; + } match visible_parent_map.entry(child) { Entry::Occupied(mut entry) => { @@ -756,10 +587,10 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { } }; - let croot = DefId { krate: cnum, index: CRATE_DEF_INDEX }; - for child in self.crate_top_level_items(cnum) { - add_child(bfs_queue, child, croot); - } + bfs_queue.push_back(DefId { + krate: cnum, + index: CRATE_DEF_INDEX + }); while let Some(def) = bfs_queue.pop_front() { for child in self.item_children(def) { add_child(bfs_queue, child, def); @@ -770,4 +601,3 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore { visible_parent_map } } - diff --git a/src/librustc_metadata/cstore.rs b/src/librustc_metadata/cstore.rs index d786cc5ba0..0a1ff70a04 100644 --- a/src/librustc_metadata/cstore.rs +++ b/src/librustc_metadata/cstore.rs @@ -8,54 +8,46 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_camel_case_types)] - // The crate store - a central repo for information collected about external // crates and libraries -pub use self::MetadataBlob::*; - -use common; -use creader; -use decoder; -use index; use loader; +use schema; use rustc::dep_graph::DepGraph; -use rustc::hir::def_id::{DefIndex, DefId}; +use rustc::hir::def_id::{CRATE_DEF_INDEX, CrateNum, DefIndex, DefId}; use rustc::hir::map::DefKey; use rustc::hir::svh::Svh; use rustc::middle::cstore::ExternCrate; use rustc::session::config::PanicStrategy; use rustc_data_structures::indexed_vec::IndexVec; -use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet, DefIdMap}; +use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet, DefIdMap, FnvHashSet}; -use std::cell::{RefCell, Ref, Cell}; +use std::cell::{RefCell, Cell}; use std::rc::Rc; use std::path::PathBuf; use flate::Bytes; -use syntax::ast; +use syntax::ast::{self, Ident}; use syntax::attr; -use syntax::codemap; use syntax_pos; -pub use middle::cstore::{NativeLibraryKind, LinkagePreference}; -pub use middle::cstore::{NativeStatic, NativeFramework, NativeUnknown}; -pub use middle::cstore::{CrateSource, LinkMeta}; +pub use rustc::middle::cstore::{NativeLibraryKind, LinkagePreference}; +pub use rustc::middle::cstore::{NativeStatic, NativeFramework, NativeUnknown}; +pub use rustc::middle::cstore::{CrateSource, LinkMeta}; // A map from external crate numbers (as decoded from some crate file) to // local crate numbers (as generated during this session). Each external // crate may refer to types in other external crates, and each has their // own crate numbers. -pub type CrateNumMap = IndexVec; +pub type CrateNumMap = IndexVec; pub enum MetadataBlob { - MetadataVec(Bytes), - MetadataArchive(loader::ArchiveMetadata), + Inflated(Bytes), + Archive(loader::ArchiveMetadata), } /// Holds information about a syntax_pos::FileMap imported from another crate. -/// See creader::import_codemap() for more information. +/// See `imported_filemaps()` for more information. pub struct ImportedFileMap { /// This FileMap's byte-offset within the codemap of its original crate pub original_start_pos: syntax_pos::BytePos, @@ -73,14 +65,12 @@ pub struct CrateMetadata { /// (e.g., by the allocator) pub extern_crate: Cell>, - pub data: MetadataBlob, + pub blob: MetadataBlob, pub cnum_map: RefCell, - pub cnum: ast::CrateNum, + pub cnum: CrateNum, pub codemap_import_info: RefCell>, - pub staged_api: bool, - pub index: index::Index, - pub xref_index: index::DenseIndex, + pub root: schema::CrateRoot, /// For each public item in this crate, we encode a key. When the /// crate is loaded, we read all the keys and put them in this @@ -105,9 +95,9 @@ pub struct CachedInlinedItem { pub struct CStore { pub dep_graph: DepGraph, - metas: RefCell>>, + metas: RefCell>>, /// Map from NodeId's of local extern crate statements to crate numbers - extern_mod_crate_map: RefCell>, + extern_mod_crate_map: RefCell>, used_crate_sources: RefCell>, used_libraries: RefCell>, used_link_args: RefCell>, @@ -115,6 +105,7 @@ pub struct CStore { pub inlined_item_cache: RefCell>>, pub defid_for_inlined_node: RefCell>, pub visible_parent_map: RefCell>, + pub used_for_derive_macro: RefCell>, } impl CStore { @@ -130,28 +121,28 @@ impl CStore { visible_parent_map: RefCell::new(FnvHashMap()), inlined_item_cache: RefCell::new(FnvHashMap()), defid_for_inlined_node: RefCell::new(FnvHashMap()), + used_for_derive_macro: RefCell::new(FnvHashSet()), } } - pub fn next_crate_num(&self) -> ast::CrateNum { - self.metas.borrow().len() as ast::CrateNum + 1 + pub fn next_crate_num(&self) -> CrateNum { + CrateNum::new(self.metas.borrow().len() + 1) } - pub fn get_crate_data(&self, cnum: ast::CrateNum) -> Rc { + pub fn get_crate_data(&self, cnum: CrateNum) -> Rc { self.metas.borrow().get(&cnum).unwrap().clone() } - pub fn get_crate_hash(&self, cnum: ast::CrateNum) -> Svh { - let cdata = self.get_crate_data(cnum); - decoder::get_crate_hash(cdata.data()) + pub fn get_crate_hash(&self, cnum: CrateNum) -> Svh { + self.get_crate_data(cnum).hash() } - pub fn set_crate_data(&self, cnum: ast::CrateNum, data: Rc) { + pub fn set_crate_data(&self, cnum: CrateNum, data: Rc) { self.metas.borrow_mut().insert(cnum, data); } pub fn iter_crate_data(&self, mut i: I) where - I: FnMut(ast::CrateNum, &Rc), + I: FnMut(CrateNum, &Rc), { for (&k, v) in self.metas.borrow().iter() { i(k, v); @@ -160,7 +151,7 @@ impl CStore { /// Like `iter_crate_data`, but passes source paths (if available) as well. pub fn iter_crate_data_origins(&self, mut i: I) where - I: FnMut(ast::CrateNum, &CrateMetadata, Option), + I: FnMut(CrateNum, &CrateMetadata, Option), { for (&k, v) in self.metas.borrow().iter() { let origin = self.opt_used_crate_source(k); @@ -176,7 +167,7 @@ impl CStore { } } - pub fn opt_used_crate_source(&self, cnum: ast::CrateNum) + pub fn opt_used_crate_source(&self, cnum: CrateNum) -> Option { self.used_crate_sources.borrow_mut() .iter().find(|source| source.cnum == cnum).cloned() @@ -191,7 +182,7 @@ impl CStore { self.statically_included_foreign_items.borrow_mut().clear(); } - pub fn crate_dependencies_in_rpo(&self, krate: ast::CrateNum) -> Vec + pub fn crate_dependencies_in_rpo(&self, krate: CrateNum) -> Vec { let mut ordering = Vec::new(); self.push_dependencies_in_postorder(&mut ordering, krate); @@ -200,8 +191,8 @@ impl CStore { } pub fn push_dependencies_in_postorder(&self, - ordering: &mut Vec, - krate: ast::CrateNum) + ordering: &mut Vec, + krate: CrateNum) { if ordering.contains(&krate) { return } @@ -225,7 +216,7 @@ impl CStore { // topological sort of all crates putting the leaves at the right-most // positions. pub fn do_get_used_crates(&self, prefer: LinkagePreference) - -> Vec<(ast::CrateNum, Option)> { + -> Vec<(CrateNum, Option)> { let mut ordering = Vec::new(); for (&num, _) in self.metas.borrow().iter() { self.push_dependencies_in_postorder(&mut ordering, num); @@ -270,7 +261,7 @@ impl CStore { pub fn add_extern_mod_stmt_cnum(&self, emod_id: ast::NodeId, - cnum: ast::CrateNum) { + cnum: CrateNum) { self.extern_mod_crate_map.borrow_mut().insert(emod_id, cnum); } @@ -282,82 +273,62 @@ impl CStore { self.statically_included_foreign_items.borrow().contains(&id) } - pub fn do_extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option + pub fn do_extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option { self.extern_mod_crate_map.borrow().get(&emod_id).cloned() } + + pub fn was_used_for_derive_macros(&self, i: &ast::Item) -> bool { + self.used_for_derive_macro.borrow().contains(&i.ident) + } + + pub fn add_used_for_derive_macros(&self, i: &ast::Item) { + self.used_for_derive_macro.borrow_mut().insert(i.ident); + } } impl CrateMetadata { - pub fn data<'a>(&'a self) -> &'a [u8] { self.data.as_slice() } - pub fn name(&self) -> &str { decoder::get_crate_name(self.data()) } - pub fn hash(&self) -> Svh { decoder::get_crate_hash(self.data()) } - pub fn disambiguator(&self) -> &str { - decoder::get_crate_disambiguator(self.data()) - } - pub fn imported_filemaps<'a>(&'a self, codemap: &codemap::CodeMap) - -> Ref<'a, Vec> { - let filemaps = self.codemap_import_info.borrow(); - if filemaps.is_empty() { - drop(filemaps); - let filemaps = creader::import_codemap(codemap, &self.data); - - // This shouldn't borrow twice, but there is no way to downgrade RefMut to Ref. - *self.codemap_import_info.borrow_mut() = filemaps; - self.codemap_import_info.borrow() - } else { - filemaps - } + pub fn name(&self) -> &str { &self.root.name } + pub fn hash(&self) -> Svh { self.root.hash } + pub fn disambiguator(&self) -> &str { &self.root.disambiguator } + + pub fn is_staged_api(&self) -> bool { + self.get_item_attrs(CRATE_DEF_INDEX).iter().any(|attr| { + attr.name() == "stable" || attr.name() == "unstable" + }) } pub fn is_allocator(&self) -> bool { - let attrs = decoder::get_crate_attributes(self.data()); + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "allocator") } pub fn needs_allocator(&self) -> bool { - let attrs = decoder::get_crate_attributes(self.data()); + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "needs_allocator") } pub fn is_panic_runtime(&self) -> bool { - let attrs = decoder::get_crate_attributes(self.data()); + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "panic_runtime") } pub fn needs_panic_runtime(&self) -> bool { - let attrs = decoder::get_crate_attributes(self.data()); + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "needs_panic_runtime") } - pub fn panic_strategy(&self) -> PanicStrategy { - decoder::get_panic_strategy(self.data()) + pub fn is_compiler_builtins(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); + attr::contains_name(&attrs, "compiler_builtins") } -} -impl MetadataBlob { - pub fn as_slice_raw<'a>(&'a self) -> &'a [u8] { - match *self { - MetadataVec(ref vec) => &vec[..], - MetadataArchive(ref ar) => ar.as_slice(), - } + pub fn is_no_builtins(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); + attr::contains_name(&attrs, "no_builtins") } - pub fn as_slice<'a>(&'a self) -> &'a [u8] { - let slice = self.as_slice_raw(); - let len_offset = 4 + common::metadata_encoding_version.len(); - if slice.len() < len_offset+4 { - &[] // corrupt metadata - } else { - let len = (((slice[len_offset+0] as u32) << 24) | - ((slice[len_offset+1] as u32) << 16) | - ((slice[len_offset+2] as u32) << 8) | - ((slice[len_offset+3] as u32) << 0)) as usize; - if len <= slice.len() - 4 - len_offset { - &slice[len_offset + 4..len_offset + len + 4] - } else { - &[] // corrupt or old metadata - } - } + pub fn panic_strategy(&self) -> PanicStrategy { + self.root.panic_strategy.clone() } } diff --git a/src/librustc_metadata/decoder.rs b/src/librustc_metadata/decoder.rs index 64b614b56e..ceb6fcb0da 100644 --- a/src/librustc_metadata/decoder.rs +++ b/src/librustc_metadata/decoder.rs @@ -10,1709 +10,1136 @@ // Decoding metadata from a single crate's metadata -#![allow(non_camel_case_types)] - -use self::Family::*; - use astencode::decode_inlined_item; -use cstore::{self, CrateMetadata}; -use common::*; -use def_key; -use encoder::def_to_u64; -use index; -use tls_context; -use tydecode::TyDecoder; - -use rustc::hir::svh::Svh; +use cstore::{self, CrateMetadata, MetadataBlob, NativeLibraryKind}; +use index::Index; +use schema::*; + use rustc::hir::map as hir_map; -use rustc::hir::map::DefKey; +use rustc::hir::map::{DefKey, DefPathData}; use rustc::util::nodemap::FnvHashMap; use rustc::hir; -use rustc::session::config::PanicStrategy; +use rustc::hir::intravisit::IdRange; -use middle::cstore::{InlinedItem, LinkagePreference}; -use middle::cstore::{DefLike, DlDef, DlField, DlImpl, tls}; -use rustc::hir::def::Def; -use rustc::hir::def_id::{DefId, DefIndex}; -use middle::lang_items; -use rustc::ty::subst; -use rustc::ty::{ImplContainer, TraitContainer}; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, VariantKind}; +use rustc::middle::cstore::{InlinedItem, LinkagePreference}; +use rustc::hir::def::{self, Def}; +use rustc::hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE}; +use rustc::middle::lang_items; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::subst::Substs; use rustc_const_math::ConstInt; -use rustc::mir; -use rustc::mir::visit::MutVisitor; +use rustc::mir::repr::Mir; -use std::cell::Cell; +use std::cell::Ref; use std::io; +use std::mem; use std::rc::Rc; use std::str; +use std::u32; -use rbml::reader; -use rbml; -use rustc_serialize::Decodable; +use rustc_serialize::{Decodable, Decoder, SpecializedDecoder, opaque}; use syntax::attr; -use syntax::parse::token; -use syntax::ast; +use syntax::ast::{self, NodeId}; use syntax::codemap; -use syntax::print::pprust; -use syntax::ptr::P; -use syntax_pos::{self, Span, BytePos, NO_EXPANSION}; +use syntax_pos::{self, Span, BytePos, Pos}; -pub type Cmd<'a> = &'a CrateMetadata; +pub struct DecodeContext<'a, 'tcx: 'a> { + opaque: opaque::Decoder<'a>, + tcx: Option>, + cdata: Option<&'a CrateMetadata>, + from_id_range: IdRange, + to_id_range: IdRange, -impl CrateMetadata { - fn get_item(&self, item_id: DefIndex) -> Option { - self.index.lookup_item(self.data(), item_id).map(|pos| { - reader::doc_at(self.data(), pos as usize).unwrap().doc - }) - } + // Cache the last used filemap for translating spans as an optimization. + last_filemap_index: usize, - fn lookup_item(&self, item_id: DefIndex) -> rbml::Doc { - match self.get_item(item_id) { - None => bug!("lookup_item: id not found: {:?} in crate {:?} with number {}", - item_id, - self.name, - self.cnum), - Some(d) => d - } - } + lazy_state: LazyState } -pub fn load_index(data: &[u8]) -> index::Index { - let index = reader::get_doc(rbml::Doc::new(data), tag_index); - index::Index::from_rbml(index) -} +/// Abstract over the various ways one can create metadata decoders. +pub trait Metadata<'a, 'tcx>: Copy { + fn raw_bytes(self) -> &'a [u8]; + fn cdata(self) -> Option<&'a CrateMetadata> { None } + fn tcx(self) -> Option> { None } -pub fn crate_rustc_version(data: &[u8]) -> Option { - let doc = rbml::Doc::new(data); - reader::maybe_get_doc(doc, tag_rustc_version).map(|s| s.as_str()) + fn decoder(self, pos: usize) -> DecodeContext<'a, 'tcx> { + let id_range = IdRange { + min: NodeId::from_u32(u32::MIN), + max: NodeId::from_u32(u32::MAX) + }; + DecodeContext { + opaque: opaque::Decoder::new(self.raw_bytes(), pos), + cdata: self.cdata(), + tcx: self.tcx(), + from_id_range: id_range, + to_id_range: id_range, + last_filemap_index: 0, + lazy_state: LazyState::NoNode + } + } } -pub fn load_xrefs(data: &[u8]) -> index::DenseIndex { - let index = reader::get_doc(rbml::Doc::new(data), tag_xref_index); - index::DenseIndex::from_buf(index.data, index.start, index.end) +impl<'a, 'tcx> Metadata<'a, 'tcx> for &'a MetadataBlob { + fn raw_bytes(self) -> &'a [u8] { + match *self { + MetadataBlob::Inflated(ref vec) => &vec[..], + MetadataBlob::Archive(ref ar) => ar.as_slice(), + } + } } -// Go through each item in the metadata and create a map from that -// item's def-key to the item's DefIndex. -pub fn load_key_map(data: &[u8]) -> FnvHashMap { - let root_doc = rbml::Doc::new(data); - let items_doc = reader::get_doc(root_doc, tag_items); - let items_data_doc = reader::get_doc(items_doc, tag_items_data); - reader::docs(items_data_doc) - .filter(|&(tag, _)| tag == tag_items_data_item) - .map(|(_, item_doc)| { - // load def-key from item - let key = item_def_key(item_doc); - - // load def-index from item; we only encode the full def-id, - // so just pull out the index - let def_id_doc = reader::get_doc(item_doc, tag_def_id); - let def_id = untranslated_def_id(def_id_doc); - assert!(def_id.is_local()); // local to the crate we are decoding, that is - - (key, def_id.index) - }) - .collect() +impl<'a, 'tcx> Metadata<'a, 'tcx> for &'a CrateMetadata { + fn raw_bytes(self) -> &'a [u8] { self.blob.raw_bytes() } + fn cdata(self) -> Option<&'a CrateMetadata> { Some(self) } } -#[derive(Clone, Copy, Debug, PartialEq)] -enum Family { - ImmStatic, // c - MutStatic, // b - Fn, // f - StaticMethod, // F - Method, // h - Type, // y - Mod, // m - ForeignMod, // n - Enum, // t - Variant(VariantKind), // V, v, w - Impl, // i - DefaultImpl, // d - Trait, // I - Struct(VariantKind), // S, s, u - PublicField, // g - InheritedField, // N - Constant, // C +impl<'a, 'tcx> Metadata<'a, 'tcx> for (&'a CrateMetadata, TyCtxt<'a, 'tcx, 'tcx>) { + fn raw_bytes(self) -> &'a [u8] { self.0.raw_bytes() } + fn cdata(self) -> Option<&'a CrateMetadata> { Some(self.0) } + fn tcx(self) -> Option> { Some(self.1) } } -fn item_family(item: rbml::Doc) -> Family { - let fam = reader::get_doc(item, tag_items_data_item_family); - match reader::doc_as_u8(fam) as char { - 'C' => Constant, - 'c' => ImmStatic, - 'b' => MutStatic, - 'f' => Fn, - 'F' => StaticMethod, - 'h' => Method, - 'y' => Type, - 'm' => Mod, - 'n' => ForeignMod, - 't' => Enum, - 'V' => Variant(VariantKind::Struct), - 'v' => Variant(VariantKind::Tuple), - 'w' => Variant(VariantKind::Unit), - 'i' => Impl, - 'd' => DefaultImpl, - 'I' => Trait, - 'S' => Struct(VariantKind::Struct), - 's' => Struct(VariantKind::Tuple), - 'u' => Struct(VariantKind::Unit), - 'g' => PublicField, - 'N' => InheritedField, - c => bug!("unexpected family char: {}", c) - } -} +// HACK(eddyb) Only used by astencode to customize the from/to IdRange's. +impl<'a, 'tcx> Metadata<'a, 'tcx> for (&'a CrateMetadata, TyCtxt<'a, 'tcx, 'tcx>, [IdRange; 2]) { + fn raw_bytes(self) -> &'a [u8] { self.0.raw_bytes() } + fn cdata(self) -> Option<&'a CrateMetadata> { Some(self.0) } + fn tcx(self) -> Option> { Some(self.1) } -fn item_visibility(item: rbml::Doc) -> ty::Visibility { - match reader::maybe_get_doc(item, tag_items_data_item_visibility) { - None => ty::Visibility::Public, - Some(visibility_doc) => { - match reader::doc_as_u8(visibility_doc) as char { - 'y' => ty::Visibility::Public, - 'i' => ty::Visibility::PrivateExternal, - _ => bug!("unknown visibility character") - } - } + fn decoder(self, pos: usize) -> DecodeContext<'a, 'tcx> { + let mut dcx = (self.0, self.1).decoder(pos); + dcx.from_id_range = self.2[0]; + dcx.to_id_range = self.2[1]; + dcx } } -fn fn_constness(item: rbml::Doc) -> hir::Constness { - match reader::maybe_get_doc(item, tag_items_data_item_constness) { - None => hir::Constness::NotConst, - Some(constness_doc) => { - match reader::doc_as_u8(constness_doc) as char { - 'c' => hir::Constness::Const, - 'n' => hir::Constness::NotConst, - _ => bug!("unknown constness character") - } - } +impl<'a, 'tcx: 'a, T: Decodable> Lazy { + pub fn decode>(self, meta: M) -> T { + let mut dcx = meta.decoder(self.position); + dcx.lazy_state = LazyState::NodeStart(self.position); + T::decode(&mut dcx).unwrap() } } -fn item_defaultness(item: rbml::Doc) -> hir::Defaultness { - match reader::maybe_get_doc(item, tag_items_data_item_defaultness) { - None => hir::Defaultness::Default, // should occur only for default impls on traits - Some(defaultness_doc) => { - match reader::doc_as_u8(defaultness_doc) as char { - 'd' => hir::Defaultness::Default, - 'f' => hir::Defaultness::Final, - _ => bug!("unknown defaultness character") - } - } +impl<'a, 'tcx: 'a, T: Decodable> LazySeq { + pub fn decode>(self, meta: M) -> impl Iterator + 'a { + let mut dcx = meta.decoder(self.position); + dcx.lazy_state = LazyState::NodeStart(self.position); + (0..self.len).map(move |_| { + T::decode(&mut dcx).unwrap() + }) } } -fn item_sort(item: rbml::Doc) -> Option { - reader::tagged_docs(item, tag_item_trait_item_sort).nth(0).map(|doc| { - doc.as_str_slice().as_bytes()[0] as char - }) -} +impl<'a, 'tcx> DecodeContext<'a, 'tcx> { + pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.tcx.expect("missing TyCtxt in DecodeContext") + } -fn untranslated_def_id(d: rbml::Doc) -> DefId { - let id = reader::doc_as_u64(d); - let index = DefIndex::new((id & 0xFFFF_FFFF) as usize); - DefId { krate: (id >> 32) as u32, index: index } -} + pub fn cdata(&self) -> &'a CrateMetadata { + self.cdata.expect("missing CrateMetadata in DecodeContext") + } -fn translated_def_id(cdata: Cmd, d: rbml::Doc) -> DefId { - let def_id = untranslated_def_id(d); - translate_def_id(cdata, def_id) -} + fn with_position R, R>(&mut self, pos: usize, f: F) -> R { + let new_opaque = opaque::Decoder::new(self.opaque.data, pos); + let old_opaque = mem::replace(&mut self.opaque, new_opaque); + let old_state = mem::replace(&mut self.lazy_state, LazyState::NoNode); + let r = f(self); + self.opaque = old_opaque; + self.lazy_state = old_state; + r + } -fn item_parent_item(cdata: Cmd, d: rbml::Doc) -> Option { - reader::tagged_docs(d, tag_items_data_parent_item).nth(0).map(|did| { - translated_def_id(cdata, did) - }) + fn read_lazy_distance(&mut self, min_size: usize) + -> Result::Error> { + let distance = self.read_usize()?; + let position = match self.lazy_state { + LazyState::NoNode => { + bug!("read_lazy_distance: outside of a metadata node") + } + LazyState::NodeStart(start) => { + assert!(distance + min_size <= start); + start - distance - min_size + } + LazyState::Previous(last_min_end) => { + last_min_end + distance + } + }; + self.lazy_state = LazyState::Previous(position + min_size); + Ok(position) + } } -fn item_require_parent_item(cdata: Cmd, d: rbml::Doc) -> DefId { - translated_def_id(cdata, reader::get_doc(d, tag_items_data_parent_item)) +macro_rules! decoder_methods { + ($($name:ident -> $ty:ty;)*) => { + $(fn $name(&mut self) -> Result<$ty, Self::Error> { + self.opaque.$name() + })* + } } -fn item_def_id(d: rbml::Doc, cdata: Cmd) -> DefId { - translated_def_id(cdata, reader::get_doc(d, tag_def_id)) -} +impl<'doc, 'tcx> Decoder for DecodeContext<'doc, 'tcx> { + type Error = as Decoder>::Error; -fn reexports<'a>(d: rbml::Doc<'a>) -> reader::TaggedDocsIterator<'a> { - reader::tagged_docs(d, tag_items_data_item_reexport) -} + decoder_methods! { + read_nil -> (); -fn variant_disr_val(d: rbml::Doc) -> Option { - reader::maybe_get_doc(d, tag_disr_val).and_then(|val_doc| { - reader::with_doc_data(val_doc, |data| { - str::from_utf8(data).ok().and_then(|s| s.parse().ok()) - }) - }) -} + read_u64 -> u64; + read_u32 -> u32; + read_u16 -> u16; + read_u8 -> u8; + read_usize -> usize; -fn doc_type<'a, 'tcx>(doc: rbml::Doc, tcx: TyCtxt<'a, 'tcx, 'tcx>, cdata: Cmd) -> Ty<'tcx> { - let tp = reader::get_doc(doc, tag_items_data_item_type); - TyDecoder::with_doc(tcx, cdata.cnum, tp, - &mut |did| translate_def_id(cdata, did)) - .parse_ty() -} + read_i64 -> i64; + read_i32 -> i32; + read_i16 -> i16; + read_i8 -> i8; + read_isize -> isize; -fn maybe_doc_type<'a, 'tcx>(doc: rbml::Doc, tcx: TyCtxt<'a, 'tcx, 'tcx>, cdata: Cmd) - -> Option> { - reader::maybe_get_doc(doc, tag_items_data_item_type).map(|tp| { - TyDecoder::with_doc(tcx, cdata.cnum, tp, - &mut |did| translate_def_id(cdata, did)) - .parse_ty() - }) -} + read_bool -> bool; + read_f64 -> f64; + read_f32 -> f32; + read_char -> char; + read_str -> String; + } -pub fn item_type<'a, 'tcx>(_item_id: DefId, item: rbml::Doc, - tcx: TyCtxt<'a, 'tcx, 'tcx>, cdata: Cmd) -> Ty<'tcx> { - doc_type(item, tcx, cdata) + fn error(&mut self, err: &str) -> Self::Error { + self.opaque.error(err) + } } -fn doc_trait_ref<'a, 'tcx>(doc: rbml::Doc, tcx: TyCtxt<'a, 'tcx, 'tcx>, cdata: Cmd) - -> ty::TraitRef<'tcx> { - TyDecoder::with_doc(tcx, cdata.cnum, doc, - &mut |did| translate_def_id(cdata, did)) - .parse_trait_ref() +impl<'a, 'tcx, T> SpecializedDecoder> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result, Self::Error> { + Ok(Lazy::with_position(self.read_lazy_distance(Lazy::::min_size())?)) + } } -fn item_trait_ref<'a, 'tcx>(doc: rbml::Doc, tcx: TyCtxt<'a, 'tcx, 'tcx>, cdata: Cmd) - -> ty::TraitRef<'tcx> { - let tp = reader::get_doc(doc, tag_item_trait_ref); - doc_trait_ref(tp, tcx, cdata) +impl<'a, 'tcx, T> SpecializedDecoder> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result, Self::Error> { + let len = self.read_usize()?; + let position = if len == 0 { + 0 + } else { + self.read_lazy_distance(LazySeq::::min_size(len))? + }; + Ok(LazySeq::with_position_and_length(position, len)) + } } -fn item_name(item: rbml::Doc) -> ast::Name { - maybe_item_name(item).expect("no item in item_name") -} +impl<'a, 'tcx> SpecializedDecoder for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result { + let id = u32::decode(self)?; -fn maybe_item_name(item: rbml::Doc) -> Option { - reader::maybe_get_doc(item, tag_paths_data_name).map(|name| { - let string = name.as_str_slice(); - token::intern(string) - }) -} + // from_id_range should be non-empty + assert!(!self.from_id_range.empty()); + // Make sure that translating the NodeId will actually yield a + // meaningful result + if !self.from_id_range.contains(NodeId::from_u32(id)) { + bug!("NodeId::decode: {} out of DecodeContext range ({:?} -> {:?})", + id, self.from_id_range, self.to_id_range); + } -fn family_to_variant_kind<'tcx>(family: Family) -> Option { - match family { - Struct(VariantKind::Struct) | Variant(VariantKind::Struct) => - Some(ty::VariantKind::Struct), - Struct(VariantKind::Tuple) | Variant(VariantKind::Tuple) => - Some(ty::VariantKind::Tuple), - Struct(VariantKind::Unit) | Variant(VariantKind::Unit) => - Some(ty::VariantKind::Unit), - _ => None, + // Use wrapping arithmetic because otherwise it introduces control flow. + // Maybe we should just have the control flow? -- aatch + Ok(NodeId::from_u32(id.wrapping_sub(self.from_id_range.min.as_u32()) + .wrapping_add(self.to_id_range.min.as_u32()))) } } -fn item_to_def_like(cdata: Cmd, item: rbml::Doc, did: DefId) -> DefLike { - let fam = item_family(item); - match fam { - Constant => { - // Check whether we have an associated const item. - match item_sort(item) { - Some('C') | Some('c') => { - DlDef(Def::AssociatedConst(did)) - } - _ => { - // Regular const item. - DlDef(Def::Const(did)) - } - } - } - ImmStatic => DlDef(Def::Static(did, false)), - MutStatic => DlDef(Def::Static(did, true)), - Struct(..) => DlDef(Def::Struct(did)), - Fn => DlDef(Def::Fn(did)), - Method | StaticMethod => { - DlDef(Def::Method(did)) - } - Type => { - if item_sort(item) == Some('t') { - let trait_did = item_require_parent_item(cdata, item); - DlDef(Def::AssociatedTy(trait_did, did)) - } else { - DlDef(Def::TyAlias(did)) - } - } - Mod => DlDef(Def::Mod(did)), - ForeignMod => DlDef(Def::ForeignMod(did)), - Variant(..) => { - let enum_did = item_require_parent_item(cdata, item); - DlDef(Def::Variant(enum_did, did)) +impl<'a, 'tcx> SpecializedDecoder for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result { + let cnum = CrateNum::from_u32(u32::decode(self)?); + if cnum == LOCAL_CRATE { + Ok(self.cdata().cnum) + } else { + Ok(self.cdata().cnum_map.borrow()[cnum]) } - Trait => DlDef(Def::Trait(did)), - Enum => DlDef(Def::Enum(did)), - Impl | DefaultImpl => DlImpl(did), - PublicField | InheritedField => DlField, } } -fn parse_unsafety(item_doc: rbml::Doc) -> hir::Unsafety { - let unsafety_doc = reader::get_doc(item_doc, tag_unsafety); - if reader::doc_as_u8(unsafety_doc) != 0 { - hir::Unsafety::Unsafe - } else { - hir::Unsafety::Normal - } -} - -fn parse_paren_sugar(item_doc: rbml::Doc) -> bool { - let paren_sugar_doc = reader::get_doc(item_doc, tag_paren_sugar); - reader::doc_as_u8(paren_sugar_doc) != 0 -} +impl<'a, 'tcx> SpecializedDecoder for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result { + let lo = BytePos::decode(self)?; + let hi = BytePos::decode(self)?; -fn parse_polarity(item_doc: rbml::Doc) -> hir::ImplPolarity { - let polarity_doc = reader::get_doc(item_doc, tag_polarity); - if reader::doc_as_u8(polarity_doc) != 0 { - hir::ImplPolarity::Negative - } else { - hir::ImplPolarity::Positive - } -} + let tcx = if let Some(tcx) = self.tcx { + tcx + } else { + return Ok(syntax_pos::mk_sp(lo, hi)); + }; -fn parse_associated_type_names(item_doc: rbml::Doc) -> Vec { - let names_doc = reader::get_doc(item_doc, tag_associated_type_names); - reader::tagged_docs(names_doc, tag_associated_type_name) - .map(|name_doc| token::intern(name_doc.as_str_slice())) - .collect() -} + let (lo, hi) = if lo > hi { + // Currently macro expansion sometimes produces invalid Span values + // where lo > hi. In order not to crash the compiler when trying to + // translate these values, let's transform them into something we + // can handle (and which will produce useful debug locations at + // least some of the time). + // This workaround is only necessary as long as macro expansion is + // not fixed. FIXME(#23480) + (lo, lo) + } else { + (lo, hi) + }; -pub fn get_trait_def<'a, 'tcx>(cdata: Cmd, - item_id: DefIndex, - tcx: TyCtxt<'a, 'tcx, 'tcx>) -> ty::TraitDef<'tcx> -{ - let item_doc = cdata.lookup_item(item_id); - let generics = doc_generics(item_doc, tcx, cdata, tag_item_generics); - let unsafety = parse_unsafety(item_doc); - let associated_type_names = parse_associated_type_names(item_doc); - let paren_sugar = parse_paren_sugar(item_doc); - - ty::TraitDef::new(unsafety, - paren_sugar, - generics, - item_trait_ref(item_doc, tcx, cdata), - associated_type_names) -} + let imported_filemaps = self.cdata().imported_filemaps(&tcx.sess.codemap()); + let filemap = { + // Optimize for the case that most spans within a translated item + // originate from the same filemap. + let last_filemap = &imported_filemaps[self.last_filemap_index]; + + if lo >= last_filemap.original_start_pos && + lo <= last_filemap.original_end_pos && + hi >= last_filemap.original_start_pos && + hi <= last_filemap.original_end_pos { + last_filemap + } else { + let mut a = 0; + let mut b = imported_filemaps.len(); -pub fn get_adt_def<'a, 'tcx>(cdata: Cmd, - item_id: DefIndex, - tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> ty::AdtDefMaster<'tcx> -{ - fn expect_variant_kind(family: Family) -> ty::VariantKind { - match family_to_variant_kind(family) { - Some(kind) => kind, - _ => bug!("unexpected family: {:?}", family), - } - } - fn get_enum_variants<'tcx>(cdata: Cmd, doc: rbml::Doc) -> Vec> { - let mut disr_val = 0; - reader::tagged_docs(doc, tag_items_data_item_variant).map(|p| { - let did = translated_def_id(cdata, p); - let item = cdata.lookup_item(did.index); + while b - a > 1 { + let m = (a + b) / 2; + if imported_filemaps[m].original_start_pos > lo { + b = m; + } else { + a = m; + } + } - if let Some(disr) = variant_disr_val(item) { - disr_val = disr; - } - let disr = disr_val; - disr_val = disr_val.wrapping_add(1); - - ty::VariantDefData { - did: did, - name: item_name(item), - fields: get_variant_fields(cdata, item), - disr_val: ConstInt::Infer(disr), - kind: expect_variant_kind(item_family(item)), + self.last_filemap_index = a; + &imported_filemaps[a] } - }).collect() - } - fn get_variant_fields<'tcx>(cdata: Cmd, doc: rbml::Doc) -> Vec> { - let mut index = 0; - reader::tagged_docs(doc, tag_item_field).map(|f| { - let ff = item_family(f); - match ff { - PublicField | InheritedField => {}, - _ => bug!("expected field, found {:?}", ff) - }; - ty::FieldDefData::new(item_def_id(f, cdata), - item_name(f), - struct_field_family_to_visibility(ff)) - }).chain(reader::tagged_docs(doc, tag_item_unnamed_field).map(|f| { - let ff = item_family(f); - let name = token::with_ident_interner(|interner| interner.intern(index.to_string())); - index += 1; - ty::FieldDefData::new(item_def_id(f, cdata), name, - struct_field_family_to_visibility(ff)) - })).collect() - } - fn get_struct_variant<'tcx>(cdata: Cmd, - doc: rbml::Doc, - did: DefId) -> ty::VariantDefData<'tcx, 'tcx> { - ty::VariantDefData { - did: did, - name: item_name(doc), - fields: get_variant_fields(cdata, doc), - disr_val: ConstInt::Infer(0), - kind: expect_variant_kind(item_family(doc)), - } + }; + + let lo = (lo - filemap.original_start_pos) + + filemap.translated_filemap.start_pos; + let hi = (hi - filemap.original_start_pos) + + filemap.translated_filemap.start_pos; + + Ok(syntax_pos::mk_sp(lo, hi)) } +} - let doc = cdata.lookup_item(item_id); - let did = DefId { krate: cdata.cnum, index: item_id }; - let mut ctor_did = None; - let (kind, variants) = match item_family(doc) { - Enum => { - (ty::AdtKind::Enum, - get_enum_variants(cdata, doc)) - } - Struct(..) => { - // Use separate constructor id for unit/tuple structs and reuse did for braced structs. - ctor_did = reader::maybe_get_doc(doc, tag_items_data_item_struct_ctor).map(|ctor_doc| { - translated_def_id(cdata, ctor_doc) - }); - (ty::AdtKind::Struct, - vec![get_struct_variant(cdata, doc, ctor_did.unwrap_or(did))]) - } - _ => bug!("get_adt_def called on a non-ADT {:?} - {:?}", - item_family(doc), did) - }; - - let adt = tcx.intern_adt_def(did, kind, variants); - if let Some(ctor_did) = ctor_did { - // Make adt definition available through constructor id as well. - tcx.insert_adt_def(ctor_did, adt); - } - - // this needs to be done *after* the variant is interned, - // to support recursive structures - for variant in &adt.variants { - if variant.kind == ty::VariantKind::Tuple && - adt.adt_kind() == ty::AdtKind::Enum { - // tuple-like enum variant fields aren't real items - get the types - // from the ctor. - debug!("evaluating the ctor-type of {:?}", - variant.name); - let ctor_ty = get_type(cdata, variant.did.index, tcx).ty; - debug!("evaluating the ctor-type of {:?}.. {:?}", - variant.name, - ctor_ty); - let field_tys = match ctor_ty.sty { - ty::TyFnDef(_, _, &ty::BareFnTy { sig: ty::Binder(ty::FnSig { - ref inputs, .. - }), ..}) => { - // tuple-struct constructors don't have escaping regions - assert!(!inputs.has_escaping_regions()); - inputs - }, - _ => bug!("tuple-variant ctor is not an ADT") +// FIXME(#36588) These impls are horribly unsound as they allow +// the caller to pick any lifetime for 'tcx, including 'static, +// by using the unspecialized proxies to them. + +impl<'a, 'tcx> SpecializedDecoder> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result, Self::Error> { + let tcx = self.tcx(); + + // Handle shorthands first, if we have an usize > 0x80. + if self.opaque.data[self.opaque.position()] & 0x80 != 0 { + let pos = self.read_usize()?; + assert!(pos >= SHORTHAND_OFFSET); + let key = ty::CReaderCacheKey { + cnum: self.cdata().cnum, + pos: pos - SHORTHAND_OFFSET }; - for (field, &ty) in variant.fields.iter().zip(field_tys.iter()) { - field.fulfill_ty(ty); + if let Some(ty) = tcx.rcache.borrow().get(&key).cloned() { + return Ok(ty); } + + let ty = self.with_position(key.pos, Ty::decode)?; + tcx.rcache.borrow_mut().insert(key, ty); + Ok(ty) } else { - for field in &variant.fields { - debug!("evaluating the type of {:?}::{:?}", variant.name, field.name); - let ty = get_type(cdata, field.did.index, tcx).ty; - field.fulfill_ty(ty); - debug!("evaluating the type of {:?}::{:?}: {:?}", - variant.name, field.name, ty); - } + Ok(tcx.mk_ty(ty::TypeVariants::decode(self)?)) } } - - adt } -pub fn get_predicates<'a, 'tcx>(cdata: Cmd, - item_id: DefIndex, - tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> ty::GenericPredicates<'tcx> -{ - let item_doc = cdata.lookup_item(item_id); - doc_predicates(item_doc, tcx, cdata, tag_item_generics) -} -pub fn get_super_predicates<'a, 'tcx>(cdata: Cmd, - item_id: DefIndex, - tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> ty::GenericPredicates<'tcx> -{ - let item_doc = cdata.lookup_item(item_id); - doc_predicates(item_doc, tcx, cdata, tag_item_super_predicates) -} +impl<'a, 'tcx> SpecializedDecoder> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result, Self::Error> { + Ok(ty::GenericPredicates { + parent: Decodable::decode(self)?, + predicates: (0..self.read_usize()?).map(|_| { + // Handle shorthands first, if we have an usize > 0x80. + if self.opaque.data[self.opaque.position()] & 0x80 != 0 { + let pos = self.read_usize()?; + assert!(pos >= SHORTHAND_OFFSET); + let pos = pos - SHORTHAND_OFFSET; -pub fn get_type<'a, 'tcx>(cdata: Cmd, id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> ty::TypeScheme<'tcx> -{ - let item_doc = cdata.lookup_item(id); - let t = item_type(DefId { krate: cdata.cnum, index: id }, item_doc, tcx, - cdata); - let generics = doc_generics(item_doc, tcx, cdata, tag_item_generics); - ty::TypeScheme { - generics: generics, - ty: t + self.with_position(pos, ty::Predicate::decode) + } else { + ty::Predicate::decode(self) + } + }).collect::, _>>()? + }) } } -pub fn get_stability(cdata: Cmd, id: DefIndex) -> Option { - let item = cdata.lookup_item(id); - reader::maybe_get_doc(item, tag_items_data_item_stability).map(|doc| { - let mut decoder = reader::Decoder::new(doc); - Decodable::decode(&mut decoder).unwrap() - }) +impl<'a, 'tcx> SpecializedDecoder<&'tcx Substs<'tcx>> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result<&'tcx Substs<'tcx>, Self::Error> { + Ok(self.tcx().mk_substs(Decodable::decode(self)?)) + } } -pub fn get_deprecation(cdata: Cmd, id: DefIndex) -> Option { - let item = cdata.lookup_item(id); - reader::maybe_get_doc(item, tag_items_data_item_deprecation).map(|doc| { - let mut decoder = reader::Decoder::new(doc); - Decodable::decode(&mut decoder).unwrap() - }) +impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::Region> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result<&'tcx ty::Region, Self::Error> { + Ok(self.tcx().mk_region(Decodable::decode(self)?)) + } } -pub fn get_visibility(cdata: Cmd, id: DefIndex) -> ty::Visibility { - item_visibility(cdata.lookup_item(id)) +impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::Slice>> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result<&'tcx ty::Slice>, Self::Error> { + Ok(self.tcx().mk_type_list(Decodable::decode(self)?)) + } } -pub fn get_parent_impl(cdata: Cmd, id: DefIndex) -> Option { - let item = cdata.lookup_item(id); - reader::maybe_get_doc(item, tag_items_data_parent_impl).map(|doc| { - translated_def_id(cdata, doc) - }) +impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::BareFnTy<'tcx>> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result<&'tcx ty::BareFnTy<'tcx>, Self::Error> { + Ok(self.tcx().mk_bare_fn(Decodable::decode(self)?)) + } } -pub fn get_repr_attrs(cdata: Cmd, id: DefIndex) -> Vec { - let item = cdata.lookup_item(id); - match reader::maybe_get_doc(item, tag_items_data_item_repr).map(|doc| { - let mut decoder = reader::Decoder::new(doc); - Decodable::decode(&mut decoder).unwrap() - }) { - Some(attrs) => attrs, - None => Vec::new(), +impl<'a, 'tcx> SpecializedDecoder> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result, Self::Error> { + let def_id = DefId::decode(self)?; + Ok(self.tcx().lookup_adt_def(def_id)) } } -pub fn get_impl_polarity<'tcx>(cdata: Cmd, - id: DefIndex) - -> Option -{ - let item_doc = cdata.lookup_item(id); - let fam = item_family(item_doc); - match fam { - Family::Impl => { - Some(parse_polarity(item_doc)) - } - _ => None +impl<'a, 'tcx> MetadataBlob { + pub fn is_compatible(&self) -> bool { + self.raw_bytes().starts_with(METADATA_HEADER) } -} -pub fn get_custom_coerce_unsized_kind<'tcx>( - cdata: Cmd, - id: DefIndex) - -> Option -{ - let item_doc = cdata.lookup_item(id); - reader::maybe_get_doc(item_doc, tag_impl_coerce_unsized_kind).map(|kind_doc| { - let mut decoder = reader::Decoder::new(kind_doc); - Decodable::decode(&mut decoder).unwrap() - }) -} + pub fn get_root(&self) -> CrateRoot { + let slice = self.raw_bytes(); + let offset = METADATA_HEADER.len(); + let pos = (((slice[offset + 0] as u32) << 24) | + ((slice[offset + 1] as u32) << 16) | + ((slice[offset + 2] as u32) << 8) | + ((slice[offset + 3] as u32) << 0)) as usize; + Lazy::with_position(pos).decode(self) + } + + /// Go through each item in the metadata and create a map from that + /// item's def-key to the item's DefIndex. + pub fn load_key_map(&self, index: LazySeq) -> FnvHashMap { + index.iter_enumerated(self.raw_bytes()).map(|(index, item)| { + (item.decode(self).def_key.decode(self), index) + }).collect() + } -pub fn get_impl_trait<'a, 'tcx>(cdata: Cmd, - id: DefIndex, - tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> Option> -{ - let item_doc = cdata.lookup_item(id); - let fam = item_family(item_doc); - match fam { - Family::Impl | Family::DefaultImpl => { - reader::maybe_get_doc(item_doc, tag_item_trait_ref).map(|tp| { - doc_trait_ref(tp, tcx, cdata) - }) + pub fn list_crate_metadata(&self, out: &mut io::Write) -> io::Result<()> { + write!(out, "=External Dependencies=\n")?; + let root = self.get_root(); + for (i, dep) in root.crate_deps.decode(self).enumerate() { + write!(out, "{} {}-{}\n", i + 1, dep.name, dep.hash)?; } - _ => None + write!(out, "\n")?; + Ok(()) } } -/// Iterates over the language items in the given crate. -pub fn each_lang_item(cdata: Cmd, mut f: F) -> bool where - F: FnMut(DefIndex, usize) -> bool, -{ - let root = rbml::Doc::new(cdata.data()); - let lang_items = reader::get_doc(root, tag_lang_items); - reader::tagged_docs(lang_items, tag_lang_items_item).all(|item_doc| { - let id_doc = reader::get_doc(item_doc, tag_lang_items_item_id); - let id = reader::doc_as_u32(id_doc) as usize; - let index_doc = reader::get_doc(item_doc, tag_lang_items_item_index); - let index = DefIndex::from_u32(reader::doc_as_u32(index_doc)); - - f(index, id) - }) +impl<'tcx> EntryKind<'tcx> { + fn to_def(&self, did: DefId) -> Option { + Some(match *self { + EntryKind::Const => Def::Const(did), + EntryKind::AssociatedConst(_) => Def::AssociatedConst(did), + EntryKind::ImmStatic | + EntryKind::ForeignImmStatic => Def::Static(did, false), + EntryKind::MutStatic | + EntryKind::ForeignMutStatic => Def::Static(did, true), + EntryKind::Struct(_) => Def::Struct(did), + EntryKind::Union(_) => Def::Union(did), + EntryKind::Fn(_) | + EntryKind::ForeignFn(_) => Def::Fn(did), + EntryKind::Method(_) => Def::Method(did), + EntryKind::Type => Def::TyAlias(did), + EntryKind::AssociatedType(_) => Def::AssociatedTy(did), + EntryKind::Mod(_) => Def::Mod(did), + EntryKind::Variant(_) => Def::Variant(did), + EntryKind::Trait(_) => Def::Trait(did), + EntryKind::Enum => Def::Enum(did), + + EntryKind::ForeignMod | + EntryKind::Impl(_) | + EntryKind::DefaultImpl(_) | + EntryKind::Field | + EntryKind::Closure (_) => { + return None + } + }) + } } -fn each_child_of_item_or_crate(cdata: Cmd, - item_doc: rbml::Doc, - mut get_crate_data: G, - mut callback: F) where - F: FnMut(DefLike, ast::Name, ty::Visibility), - G: FnMut(ast::CrateNum) -> Rc, -{ - // Iterate over all children. - for child_info_doc in reader::tagged_docs(item_doc, tag_mod_child) { - let child_def_id = translated_def_id(cdata, child_info_doc); - - // This item may be in yet another crate if it was the child of a - // reexport. - let crate_data = if child_def_id.krate == cdata.cnum { - None - } else { - Some(get_crate_data(child_def_id.krate)) - }; - let crate_data = match crate_data { - Some(ref cdata) => &**cdata, - None => cdata - }; +impl<'a, 'tcx> CrateMetadata { + fn maybe_entry(&self, item_id: DefIndex) -> Option>> { + self.root.index.lookup(self.blob.raw_bytes(), item_id) + } - // Get the item. - if let Some(child_item_doc) = crate_data.get_item(child_def_id.index) { - // Hand off the item to the callback. - let child_name = item_name(child_item_doc); - let def_like = item_to_def_like(crate_data, child_item_doc, child_def_id); - let visibility = item_visibility(child_item_doc); - callback(def_like, child_name, visibility); + fn entry(&self, item_id: DefIndex) -> Entry<'tcx> { + match self.maybe_entry(item_id) { + None => bug!("entry: id not found: {:?} in crate {:?} with number {}", + item_id, + self.name, + self.cnum), + Some(d) => d.decode(self) } } - for reexport_doc in reexports(item_doc) { - let def_id_doc = reader::get_doc(reexport_doc, - tag_items_data_item_reexport_def_id); - let child_def_id = translated_def_id(cdata, def_id_doc); - - let name_doc = reader::get_doc(reexport_doc, - tag_items_data_item_reexport_name); - let name = name_doc.as_str_slice(); - - // This reexport may be in yet another crate. - let crate_data = if child_def_id.krate == cdata.cnum { - None - } else { - Some(get_crate_data(child_def_id.krate)) - }; - let crate_data = match crate_data { - Some(ref cdata) => &**cdata, - None => cdata - }; - - // Get the item. - if let Some(child_item_doc) = crate_data.get_item(child_def_id.index) { - // Hand off the item to the callback. - let def_like = item_to_def_like(crate_data, child_item_doc, child_def_id); - // These items have a public visibility because they're part of - // a public re-export. - callback(def_like, token::intern(name), ty::Visibility::Public); + fn local_def_id(&self, index: DefIndex) -> DefId { + DefId { + krate: self.cnum, + index: index } } -} - -/// Iterates over each child of the given item. -pub fn each_child_of_item(cdata: Cmd, id: DefIndex, get_crate_data: G, callback: F) - where F: FnMut(DefLike, ast::Name, ty::Visibility), - G: FnMut(ast::CrateNum) -> Rc, -{ - // Find the item. - let item_doc = match cdata.get_item(id) { - None => return, - Some(item_doc) => item_doc, - }; - - each_child_of_item_or_crate(cdata, item_doc, get_crate_data, callback) -} - -/// Iterates over all the top-level crate items. -pub fn each_top_level_item_of_crate(cdata: Cmd, get_crate_data: G, callback: F) - where F: FnMut(DefLike, ast::Name, ty::Visibility), - G: FnMut(ast::CrateNum) -> Rc, -{ - let root_doc = rbml::Doc::new(cdata.data()); - let misc_info_doc = reader::get_doc(root_doc, tag_misc_info); - let crate_items_doc = reader::get_doc(misc_info_doc, - tag_misc_info_crate_items); - - each_child_of_item_or_crate(cdata, - crate_items_doc, - get_crate_data, - callback) -} - -pub fn get_item_name(cdata: Cmd, id: DefIndex) -> ast::Name { - item_name(cdata.lookup_item(id)) -} - -pub fn maybe_get_item_name(cdata: Cmd, id: DefIndex) -> Option { - maybe_item_name(cdata.lookup_item(id)) -} -pub enum FoundAst<'ast> { - Found(&'ast InlinedItem), - FoundParent(DefId, &'ast hir::Item), - NotFound, -} - -pub fn maybe_get_item_ast<'a, 'tcx>(cdata: Cmd, tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefIndex) - -> FoundAst<'tcx> { - debug!("Looking up item: {:?}", id); - let item_doc = cdata.lookup_item(id); - let item_did = item_def_id(item_doc, cdata); - let parent_def_id = DefId { - krate: cdata.cnum, - index: def_key(cdata, id).parent.unwrap() - }; - let mut parent_def_path = def_path(cdata, id); - parent_def_path.data.pop(); - if let Some(ast_doc) = reader::maybe_get_doc(item_doc, tag_ast as usize) { - let ii = decode_inlined_item(cdata, - tcx, - parent_def_path, - parent_def_id, - ast_doc, - item_did); - return FoundAst::Found(ii); - } else if let Some(parent_did) = item_parent_item(cdata, item_doc) { - // Remove the last element from the paths, since we are now - // trying to inline the parent. - let grandparent_def_id = DefId { - krate: cdata.cnum, - index: def_key(cdata, parent_def_id.index).parent.unwrap() - }; - let mut grandparent_def_path = parent_def_path; - grandparent_def_path.data.pop(); - let parent_doc = cdata.lookup_item(parent_did.index); - if let Some(ast_doc) = reader::maybe_get_doc(parent_doc, tag_ast as usize) { - let ii = decode_inlined_item(cdata, - tcx, - grandparent_def_path, - grandparent_def_id, - ast_doc, - parent_did); - if let &InlinedItem::Item(_, ref i) = ii { - return FoundAst::FoundParent(parent_did, i); - } - } + fn item_name(&self, item: &Entry<'tcx>) -> ast::Name { + item.def_key.decode(self).disambiguated_data.data.get_opt_name() + .expect("no name in item_name") } - FoundAst::NotFound -} -pub fn is_item_mir_available<'tcx>(cdata: Cmd, id: DefIndex) -> bool { - if let Some(item_doc) = cdata.get_item(id) { - return reader::maybe_get_doc(item_doc, tag_mir as usize).is_some(); + pub fn get_def(&self, index: DefIndex) -> Option { + self.entry(index).kind.to_def(self.local_def_id(index)) } - false -} + pub fn get_trait_def(&self, + item_id: DefIndex, + tcx: TyCtxt<'a, 'tcx, 'tcx>) -> ty::TraitDef<'tcx> { + let data = match self.entry(item_id).kind { + EntryKind::Trait(data) => data.decode(self), + _ => bug!() + }; -pub fn maybe_get_item_mir<'a, 'tcx>(cdata: Cmd, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: DefIndex) - -> Option> { - let item_doc = cdata.lookup_item(id); + ty::TraitDef::new(data.unsafety, data.paren_sugar, + tcx.lookup_generics(self.local_def_id(item_id)), + data.trait_ref.decode((self, tcx)), + self.def_path(item_id).unwrap().deterministic_hash(tcx)) + } - return reader::maybe_get_doc(item_doc, tag_mir as usize).map(|mir_doc| { - let dcx = tls_context::DecodingContext { - crate_metadata: cdata, - tcx: tcx, + fn get_variant(&self, item: &Entry<'tcx>, index: DefIndex) + -> (ty::VariantDefData<'tcx, 'tcx>, Option) { + let data = match item.kind { + EntryKind::Variant(data) | + EntryKind::Struct(data) | + EntryKind::Union(data) => data.decode(self), + _ => bug!() }; - let mut decoder = reader::Decoder::new(mir_doc); - let mut mir = decoder.read_opaque(|opaque_decoder, _| { - tls::enter_decoding_context(&dcx, opaque_decoder, |_, opaque_decoder| { - Decodable::decode(opaque_decoder) - }) - }).unwrap(); - - assert!(decoder.position() == mir_doc.end); + let fields = item.children.decode(self).map(|index| { + let f = self.entry(index); + ty::FieldDefData::new(self.local_def_id(index), + self.item_name(&f), + f.visibility) + }).collect(); + + (ty::VariantDefData { + did: self.local_def_id(data.struct_ctor.unwrap_or(index)), + name: self.item_name(item), + fields: fields, + disr_val: ConstInt::Infer(data.disr), + kind: data.kind, + }, data.struct_ctor) + } - let mut def_id_and_span_translator = MirDefIdAndSpanTranslator { - crate_metadata: cdata, - codemap: tcx.sess.codemap(), - last_filemap_index_hint: Cell::new(0), + pub fn get_adt_def(&self, item_id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> ty::AdtDefMaster<'tcx> { + let item = self.entry(item_id); + let did = self.local_def_id(item_id); + let mut ctor_index = None; + let variants = if let EntryKind::Enum = item.kind { + item.children.decode(self).map(|index| { + let (variant, struct_ctor) = self.get_variant(&self.entry(index), index); + assert_eq!(struct_ctor, None); + variant + }).collect() + } else{ + let (variant, struct_ctor) = self.get_variant(&item, item_id); + ctor_index = struct_ctor; + vec![variant] + }; + let kind = match item.kind { + EntryKind::Enum => ty::AdtKind::Enum, + EntryKind::Struct(_) => ty::AdtKind::Struct, + EntryKind::Union(_) => ty::AdtKind::Union, + _ => bug!("get_adt_def called on a non-ADT {:?}", did) }; - def_id_and_span_translator.visit_mir(&mut mir); - for promoted in &mut mir.promoted { - def_id_and_span_translator.visit_mir(promoted); + let adt = tcx.intern_adt_def(did, kind, variants); + if let Some(ctor_index) = ctor_index { + // Make adt definition available through constructor id as well. + tcx.insert_adt_def(self.local_def_id(ctor_index), adt); } - mir - }); + // this needs to be done *after* the variant is interned, + // to support recursive structures + for variant in &adt.variants { + for field in &variant.fields { + debug!("evaluating the type of {:?}::{:?}", variant.name, field.name); + let ty = self.get_type(field.did.index, tcx); + field.fulfill_ty(ty); + debug!("evaluating the type of {:?}::{:?}: {:?}", + variant.name, field.name, ty); + } + } - struct MirDefIdAndSpanTranslator<'cdata, 'codemap> { - crate_metadata: Cmd<'cdata>, - codemap: &'codemap codemap::CodeMap, - last_filemap_index_hint: Cell + adt } - impl<'v, 'cdata, 'codemap> mir::visit::MutVisitor<'v> - for MirDefIdAndSpanTranslator<'cdata, 'codemap> - { - fn visit_def_id(&mut self, def_id: &mut DefId) { - *def_id = translate_def_id(self.crate_metadata, *def_id); - } + pub fn get_predicates(&self, item_id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> ty::GenericPredicates<'tcx> { + self.entry(item_id).predicates.unwrap().decode((self, tcx)) + } - fn visit_span(&mut self, span: &mut Span) { - *span = translate_span(self.crate_metadata, - self.codemap, - &self.last_filemap_index_hint, - *span); + pub fn get_super_predicates(&self, item_id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> ty::GenericPredicates<'tcx> { + match self.entry(item_id).kind { + EntryKind::Trait(data) => { + data.decode(self).super_predicates.decode((self, tcx)) + } + _ => bug!() } } -} -fn get_explicit_self(item: rbml::Doc) -> ty::ExplicitSelfCategory { - fn get_mutability(ch: u8) -> hir::Mutability { - match ch as char { - 'i' => hir::MutImmutable, - 'm' => hir::MutMutable, - _ => bug!("unknown mutability character: `{}`", ch as char), - } + pub fn get_generics(&self, item_id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> ty::Generics<'tcx> { + self.entry(item_id).generics.unwrap().decode((self, tcx)) } - let explicit_self_doc = reader::get_doc(item, tag_item_trait_method_explicit_self); - let string = explicit_self_doc.as_str_slice(); + pub fn get_type(&self, id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { + self.entry(id).ty.unwrap().decode((self, tcx)) + } - let explicit_self_kind = string.as_bytes()[0]; - match explicit_self_kind as char { - 's' => ty::ExplicitSelfCategory::Static, - 'v' => ty::ExplicitSelfCategory::ByValue, - '~' => ty::ExplicitSelfCategory::ByBox, - // FIXME(#4846) expl. region - '&' => { - ty::ExplicitSelfCategory::ByReference( - ty::ReEmpty, - get_mutability(string.as_bytes()[1])) - } - _ => bug!("unknown self type code: `{}`", explicit_self_kind as char) + pub fn get_stability(&self, id: DefIndex) -> Option { + self.entry(id).stability.map(|stab| stab.decode(self)) } -} -/// Returns the def IDs of all the items in the given implementation. -pub fn get_impl_items(cdata: Cmd, impl_id: DefIndex) - -> Vec { - reader::tagged_docs(cdata.lookup_item(impl_id), tag_item_impl_item).map(|doc| { - let def_id = item_def_id(doc, cdata); - match item_sort(doc) { - Some('C') | Some('c') => ty::ConstTraitItemId(def_id), - Some('r') | Some('p') => ty::MethodTraitItemId(def_id), - Some('t') => ty::TypeTraitItemId(def_id), - _ => bug!("unknown impl item sort"), - } - }).collect() -} + pub fn get_deprecation(&self, id: DefIndex) -> Option { + self.entry(id).deprecation.map(|depr| depr.decode(self)) + } -pub fn get_trait_name(cdata: Cmd, id: DefIndex) -> ast::Name { - let doc = cdata.lookup_item(id); - item_name(doc) -} + pub fn get_visibility(&self, id: DefIndex) -> ty::Visibility { + self.entry(id).visibility + } -pub fn is_static_method(cdata: Cmd, id: DefIndex) -> bool { - let doc = cdata.lookup_item(id); - match item_sort(doc) { - Some('r') | Some('p') => { - get_explicit_self(doc) == ty::ExplicitSelfCategory::Static + fn get_impl_data(&self, id: DefIndex) -> ImplData<'tcx> { + match self.entry(id).kind { + EntryKind::Impl(data) => data.decode(self), + _ => bug!() } - _ => false } -} -pub fn get_impl_or_trait_item<'a, 'tcx>(cdata: Cmd, id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> Option> { - let item_doc = cdata.lookup_item(id); - - let def_id = item_def_id(item_doc, cdata); - - let container_id = if let Some(id) = item_parent_item(cdata, item_doc) { - id - } else { - return None; - }; - let container_doc = cdata.lookup_item(container_id.index); - let container = match item_family(container_doc) { - Trait => TraitContainer(container_id), - _ => ImplContainer(container_id), - }; - - let name = item_name(item_doc); - let vis = item_visibility(item_doc); - let defaultness = item_defaultness(item_doc); - - Some(match item_sort(item_doc) { - sort @ Some('C') | sort @ Some('c') => { - let ty = doc_type(item_doc, tcx, cdata); - ty::ConstTraitItem(Rc::new(ty::AssociatedConst { - name: name, - ty: ty, - vis: vis, - defaultness: defaultness, - def_id: def_id, - container: container, - has_value: sort == Some('C') - })) - } - Some('r') | Some('p') => { - let generics = doc_generics(item_doc, tcx, cdata, tag_method_ty_generics); - let predicates = doc_predicates(item_doc, tcx, cdata, tag_method_ty_generics); - let ity = tcx.lookup_item_type(def_id).ty; - let fty = match ity.sty { - ty::TyFnDef(_, _, fty) => fty, - _ => bug!( - "the type {:?} of the method {:?} is not a function?", - ity, name) - }; - let explicit_self = get_explicit_self(item_doc); - - ty::MethodTraitItem(Rc::new(ty::Method::new(name, - generics, - predicates, - fty, - explicit_self, - vis, - defaultness, - def_id, - container))) - } - Some('t') => { - let ty = maybe_doc_type(item_doc, tcx, cdata); - ty::TypeTraitItem(Rc::new(ty::AssociatedType { - name: name, - ty: ty, - vis: vis, - defaultness: defaultness, - def_id: def_id, - container: container, - })) - } - _ => return None - }) -} + pub fn get_parent_impl(&self, id: DefIndex) -> Option { + self.get_impl_data(id).parent_impl + } -pub fn get_trait_item_def_ids(cdata: Cmd, id: DefIndex) - -> Vec { - let item = cdata.lookup_item(id); - reader::tagged_docs(item, tag_item_trait_item).map(|mth| { - let def_id = item_def_id(mth, cdata); - match item_sort(mth) { - Some('C') | Some('c') => ty::ConstTraitItemId(def_id), - Some('r') | Some('p') => ty::MethodTraitItemId(def_id), - Some('t') => ty::TypeTraitItemId(def_id), - _ => bug!("unknown trait item sort"), - } - }).collect() -} + pub fn get_impl_polarity(&self, id: DefIndex) -> hir::ImplPolarity { + self.get_impl_data(id).polarity + } -pub fn get_item_variances(cdata: Cmd, id: DefIndex) -> ty::ItemVariances { - let item_doc = cdata.lookup_item(id); - let variance_doc = reader::get_doc(item_doc, tag_item_variances); - let mut decoder = reader::Decoder::new(variance_doc); - Decodable::decode(&mut decoder).unwrap() -} + pub fn get_custom_coerce_unsized_kind(&self, id: DefIndex) + -> Option { + self.get_impl_data(id).coerce_unsized_kind + } -pub fn get_provided_trait_methods<'a, 'tcx>(cdata: Cmd, - id: DefIndex, - tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> Vec>> { - let item = cdata.lookup_item(id); + pub fn get_impl_trait(&self, + id: DefIndex, + tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> Option> { + self.get_impl_data(id).trait_ref.map(|tr| tr.decode((self, tcx))) + } - reader::tagged_docs(item, tag_item_trait_item).filter_map(|mth_id| { - let did = item_def_id(mth_id, cdata); - let mth = cdata.lookup_item(did.index); + /// Iterates over the language items in the given crate. + pub fn get_lang_items(&self) -> Vec<(DefIndex, usize)> { + self.root.lang_items.decode(self).collect() + } - if item_sort(mth) == Some('p') { - let trait_item = get_impl_or_trait_item(cdata, did.index, tcx); - if let Some(ty::MethodTraitItem(ref method)) = trait_item { - Some((*method).clone()) - } else { - None - } - } else { - None - } - }).collect() -} + /// Iterates over each child of the given item. + pub fn each_child_of_item(&self, id: DefIndex, mut callback: F) + where F: FnMut(def::Export) + { + // Find the item. + let item = match self.maybe_entry(id) { + None => return, + Some(item) => item.decode(self), + }; -pub fn get_associated_consts<'a, 'tcx>(cdata: Cmd, id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> Vec>> { - let item = cdata.lookup_item(id); + // Iterate over all children. + for child_index in item.children.decode(self) { + // Get the item. + if let Some(child) = self.maybe_entry(child_index) { + let child = child.decode(self); + // Hand off the item to the callback. + match child.kind { + // FIXME(eddyb) Don't encode these in children. + EntryKind::ForeignMod => { + for child_index in child.children.decode(self) { + callback(def::Export { + def_id: self.local_def_id(child_index), + name: self.item_name(&self.entry(child_index)) + }); + } + continue; + } + EntryKind::Impl(_) | EntryKind::DefaultImpl(_) => continue, - [tag_item_trait_item, tag_item_impl_item].iter().flat_map(|&tag| { - reader::tagged_docs(item, tag).filter_map(|ac_id| { - let did = item_def_id(ac_id, cdata); - let ac_doc = cdata.lookup_item(did.index); + _ => {} + } - match item_sort(ac_doc) { - Some('C') | Some('c') => { - let trait_item = get_impl_or_trait_item(cdata, did.index, tcx); - if let Some(ty::ConstTraitItem(ref ac)) = trait_item { - Some((*ac).clone()) - } else { - None - } + let def_key = child.def_key.decode(self); + if let Some(name) = def_key.disambiguated_data.data.get_opt_name() { + callback(def::Export { + def_id: self.local_def_id(child_index), + name: name + }); } - _ => None } - }) - }).collect() -} - -pub fn get_variant_kind(cdata: Cmd, node_id: DefIndex) -> Option -{ - let item = cdata.lookup_item(node_id); - family_to_variant_kind(item_family(item)) -} - -pub fn get_struct_ctor_def_id(cdata: Cmd, node_id: DefIndex) -> Option -{ - let item = cdata.lookup_item(node_id); - reader::maybe_get_doc(item, tag_items_data_item_struct_ctor). - map(|ctor_doc| translated_def_id(cdata, ctor_doc)) -} + } -/// If node_id is the constructor of a tuple struct, retrieve the NodeId of -/// the actual type definition, otherwise, return None -pub fn get_tuple_struct_definition_if_ctor(cdata: Cmd, - node_id: DefIndex) - -> Option -{ - let item = cdata.lookup_item(node_id); - reader::tagged_docs(item, tag_items_data_item_is_tuple_struct_ctor).next().map(|_| { - item_require_parent_item(cdata, item) - }) -} + if let EntryKind::Mod(data) = item.kind { + for exp in data.decode(self).reexports.decode(self) { + callback(exp); + } + } + } -pub fn get_item_attrs(cdata: Cmd, - orig_node_id: DefIndex) - -> Vec { - // The attributes for a tuple struct are attached to the definition, not the ctor; - // we assume that someone passing in a tuple struct ctor is actually wanting to - // look at the definition - let node_id = get_tuple_struct_definition_if_ctor(cdata, orig_node_id); - let node_id = node_id.map(|x| x.index).unwrap_or(orig_node_id); - let item = cdata.lookup_item(node_id); - get_attributes(item) -} + pub fn maybe_get_item_ast(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefIndex) + -> Option<&'tcx InlinedItem> { + debug!("Looking up item: {:?}", id); + let item_doc = self.entry(id); + let item_did = self.local_def_id(id); + let parent_def_id = self.local_def_id(self.def_key(id).parent.unwrap()); + let mut parent_def_path = self.def_path(id).unwrap(); + parent_def_path.data.pop(); + item_doc.ast.map(|ast| { + let ast = ast.decode(self); + decode_inlined_item(self, tcx, parent_def_path, parent_def_id, ast, item_did) + }) + } -pub fn get_struct_field_attrs(cdata: Cmd) -> FnvHashMap> { - let data = rbml::Doc::new(cdata.data()); - let fields = reader::get_doc(data, tag_struct_fields); - reader::tagged_docs(fields, tag_struct_field).map(|field| { - let def_id = translated_def_id(cdata, reader::get_doc(field, tag_def_id)); - let attrs = get_attributes(field); - (def_id, attrs) - }).collect() -} + pub fn is_item_mir_available(&self, id: DefIndex) -> bool { + self.maybe_entry(id).and_then(|item| item.decode(self).mir).is_some() + } -fn struct_field_family_to_visibility(family: Family) -> ty::Visibility { - match family { - PublicField => ty::Visibility::Public, - InheritedField => ty::Visibility::PrivateExternal, - _ => bug!() + pub fn maybe_get_item_mir(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefIndex) + -> Option> { + self.entry(id).mir.map(|mir| mir.decode((self, tcx))) } -} -pub fn get_struct_field_names(cdata: Cmd, id: DefIndex) -> Vec { - let item = cdata.lookup_item(id); - let mut index = 0; - reader::tagged_docs(item, tag_item_field).map(|an_item| { - item_name(an_item) - }).chain(reader::tagged_docs(item, tag_item_unnamed_field).map(|_| { - let name = token::with_ident_interner(|interner| interner.intern(index.to_string())); - index += 1; - name - })).collect() -} + pub fn get_impl_or_trait_item(&self, id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> Option> { + let item = self.entry(id); + let parent_and_name = || { + let def_key = item.def_key.decode(self); + (self.local_def_id(def_key.parent.unwrap()), + def_key.disambiguated_data.data.get_opt_name().unwrap()) + }; -fn get_meta_items(md: rbml::Doc) -> Vec> { - reader::tagged_docs(md, tag_meta_item_word).map(|meta_item_doc| { - let nd = reader::get_doc(meta_item_doc, tag_meta_item_name); - let n = token::intern_and_get_ident(nd.as_str_slice()); - attr::mk_word_item(n) - }).chain(reader::tagged_docs(md, tag_meta_item_name_value).map(|meta_item_doc| { - let nd = reader::get_doc(meta_item_doc, tag_meta_item_name); - let vd = reader::get_doc(meta_item_doc, tag_meta_item_value); - let n = token::intern_and_get_ident(nd.as_str_slice()); - let v = token::intern_and_get_ident(vd.as_str_slice()); - // FIXME (#623): Should be able to decode MetaItemKind::NameValue variants, - // but currently the encoder just drops them - attr::mk_name_value_item_str(n, v) - })).chain(reader::tagged_docs(md, tag_meta_item_list).map(|meta_item_doc| { - let nd = reader::get_doc(meta_item_doc, tag_meta_item_name); - let n = token::intern_and_get_ident(nd.as_str_slice()); - let subitems = get_meta_items(meta_item_doc); - attr::mk_list_item(n, subitems) - })).collect() -} + Some(match item.kind { + EntryKind::AssociatedConst(container) => { + let (parent, name) = parent_and_name(); + ty::ConstTraitItem(Rc::new(ty::AssociatedConst { + name: name, + ty: item.ty.unwrap().decode((self, tcx)), + vis: item.visibility, + defaultness: container.defaultness(), + def_id: self.local_def_id(id), + container: container.with_def_id(parent), + has_value: container.has_body(), + })) + } + EntryKind::Method(data) => { + let (parent, name) = parent_and_name(); + let ity = item.ty.unwrap().decode((self, tcx)); + let fty = match ity.sty { + ty::TyFnDef(.., fty) => fty, + _ => bug!( + "the type {:?} of the method {:?} is not a function?", + ity, name) + }; + + let data = data.decode(self); + ty::MethodTraitItem(Rc::new(ty::Method { + name: name, + generics: tcx.lookup_generics(self.local_def_id(id)), + predicates: item.predicates.unwrap().decode((self, tcx)), + fty: fty, + explicit_self: data.explicit_self.decode((self, tcx)), + vis: item.visibility, + defaultness: data.container.defaultness(), + has_body: data.container.has_body(), + def_id: self.local_def_id(id), + container: data.container.with_def_id(parent), + })) + } + EntryKind::AssociatedType(container) => { + let (parent, name) = parent_and_name(); + ty::TypeTraitItem(Rc::new(ty::AssociatedType { + name: name, + ty: item.ty.map(|ty| ty.decode((self, tcx))), + vis: item.visibility, + defaultness: container.defaultness(), + def_id: self.local_def_id(id), + container: container.with_def_id(parent), + })) + } + _ => return None + }) + } -fn get_attributes(md: rbml::Doc) -> Vec { - match reader::maybe_get_doc(md, tag_attributes) { - Some(attrs_d) => { - reader::tagged_docs(attrs_d, tag_attribute).map(|attr_doc| { - let is_sugared_doc = reader::doc_as_u8( - reader::get_doc(attr_doc, tag_attribute_is_sugared_doc) - ) == 1; - let meta_items = get_meta_items(attr_doc); - // Currently it's only possible to have a single meta item on - // an attribute - assert_eq!(meta_items.len(), 1); - let meta_item = meta_items.into_iter().nth(0).unwrap(); - attr::mk_doc_attr_outer(attr::mk_attr_id(), meta_item, is_sugared_doc) - }).collect() - }, - None => vec![], + pub fn get_item_variances(&self, id: DefIndex) -> Vec { + self.entry(id).variances.decode(self).collect() } -} -fn list_crate_attributes(md: rbml::Doc, hash: &Svh, - out: &mut io::Write) -> io::Result<()> { - write!(out, "=Crate Attributes ({})=\n", *hash)?; + pub fn get_variant_kind(&self, node_id: DefIndex) -> Option { + match self.entry(node_id).kind { + EntryKind::Struct(data) | + EntryKind::Union(data) | + EntryKind::Variant(data) => Some(data.decode(self).kind), + _ => None + } + } - let r = get_attributes(md); - for attr in &r { - write!(out, "{}\n", pprust::attribute_to_string(attr))?; + pub fn get_struct_ctor_def_id(&self, node_id: DefIndex) -> Option { + match self.entry(node_id).kind { + EntryKind::Struct(data) => { + data.decode(self).struct_ctor.map(|index| self.local_def_id(index)) + } + _ => None + } } - write!(out, "\n\n") -} + pub fn get_item_attrs(&self, node_id: DefIndex) -> Vec { + // The attributes for a tuple struct are attached to the definition, not the ctor; + // we assume that someone passing in a tuple struct ctor is actually wanting to + // look at the definition + let mut item = self.entry(node_id); + let def_key = item.def_key.decode(self); + if def_key.disambiguated_data.data == DefPathData::StructCtor { + item = self.entry(def_key.parent.unwrap()); + } + self.get_attributes(&item) + } -pub fn get_crate_attributes(data: &[u8]) -> Vec { - get_attributes(rbml::Doc::new(data)) -} + pub fn get_struct_field_names(&self, id: DefIndex) -> Vec { + self.entry(id).children.decode(self).map(|index| { + self.item_name(&self.entry(index)) + }).collect() + } -#[derive(Clone)] -pub struct CrateDep { - pub cnum: ast::CrateNum, - pub name: String, - pub hash: Svh, - pub explicitly_linked: bool, -} + fn get_attributes(&self, item: &Entry<'tcx>) -> Vec { + item.attributes.decode(self).map(|mut attr| { + // Need new unique IDs: old thread-local IDs won't map to new threads. + attr.node.id = attr::mk_attr_id(); + attr + }).collect() + } -pub fn get_crate_deps(data: &[u8]) -> Vec { - let cratedoc = rbml::Doc::new(data); - let depsdoc = reader::get_doc(cratedoc, tag_crate_deps); - - fn docstr(doc: rbml::Doc, tag_: usize) -> String { - let d = reader::get_doc(doc, tag_); - d.as_str_slice().to_string() - } - - reader::tagged_docs(depsdoc, tag_crate_dep).enumerate().map(|(crate_num, depdoc)| { - let name = docstr(depdoc, tag_crate_dep_crate_name); - let hash = Svh::new(reader::doc_as_u64(reader::get_doc(depdoc, tag_crate_dep_hash))); - let doc = reader::get_doc(depdoc, tag_crate_dep_explicitly_linked); - let explicitly_linked = reader::doc_as_u8(doc) != 0; - CrateDep { - cnum: crate_num as u32 + 1, - name: name, - hash: hash, - explicitly_linked: explicitly_linked, + // Translate a DefId from the current compilation environment to a DefId + // for an external crate. + fn reverse_translate_def_id(&self, did: DefId) -> Option { + for (local, &global) in self.cnum_map.borrow().iter_enumerated() { + if global == did.krate { + return Some(DefId { krate: local, index: did.index }); + } } - }).collect() -} -fn list_crate_deps(data: &[u8], out: &mut io::Write) -> io::Result<()> { - write!(out, "=External Dependencies=\n")?; - for dep in &get_crate_deps(data) { - write!(out, "{} {}-{}\n", dep.cnum, dep.name, dep.hash)?; + None } - write!(out, "\n")?; - Ok(()) -} -pub fn maybe_get_crate_hash(data: &[u8]) -> Option { - let cratedoc = rbml::Doc::new(data); - reader::maybe_get_doc(cratedoc, tag_crate_hash).map(|doc| { - Svh::new(reader::doc_as_u64(doc)) - }) -} + pub fn get_inherent_implementations_for_type(&self, id: DefIndex) -> Vec { + self.entry(id).inherent_impls.decode(self).map(|index| { + self.local_def_id(index) + }).collect() + } -pub fn get_crate_hash(data: &[u8]) -> Svh { - let cratedoc = rbml::Doc::new(data); - let hashdoc = reader::get_doc(cratedoc, tag_crate_hash); - Svh::new(reader::doc_as_u64(hashdoc)) -} + pub fn get_implementations_for_trait(&self, filter: Option, result: &mut Vec) { + // Do a reverse lookup beforehand to avoid touching the crate_num + // hash map in the loop below. + let filter = match filter.map(|def_id| self.reverse_translate_def_id(def_id)) { + Some(Some(def_id)) => Some((def_id.krate.as_u32(), def_id.index)), + Some(None) => return, + None => None + }; -pub fn maybe_get_crate_name(data: &[u8]) -> Option<&str> { - let cratedoc = rbml::Doc::new(data); - reader::maybe_get_doc(cratedoc, tag_crate_crate_name).map(|doc| { - doc.as_str_slice() - }) -} + // FIXME(eddyb) Make this O(1) instead of O(n). + for trait_impls in self.root.impls.decode(self) { + if filter.is_some() && filter != Some(trait_impls.trait_id) { + continue; + } -pub fn get_crate_disambiguator<'a>(data: &'a [u8]) -> &'a str { - let crate_doc = rbml::Doc::new(data); - let disambiguator_doc = reader::get_doc(crate_doc, tag_crate_disambiguator); - let slice: &'a str = disambiguator_doc.as_str_slice(); - slice -} + result.extend(trait_impls.impls.decode(self).map(|index| { + self.local_def_id(index) + })); -pub fn get_crate_triple(data: &[u8]) -> Option { - let cratedoc = rbml::Doc::new(data); - let triple_doc = reader::maybe_get_doc(cratedoc, tag_crate_triple); - triple_doc.map(|s| s.as_str().to_string()) -} + if filter.is_some() { + break; + } + } + } -pub fn get_crate_name(data: &[u8]) -> &str { - maybe_get_crate_name(data).expect("no crate name in crate") -} + pub fn get_trait_of_item(&self, id: DefIndex) -> Option { + self.entry(id).def_key.decode(self).parent.and_then(|parent_index| { + match self.entry(parent_index).kind { + EntryKind::Trait(_) => Some(self.local_def_id(parent_index)), + _ => None + } + }) + } -pub fn list_crate_metadata(bytes: &[u8], out: &mut io::Write) -> io::Result<()> { - let hash = get_crate_hash(bytes); - let md = rbml::Doc::new(bytes); - list_crate_attributes(md, &hash, out)?; - list_crate_deps(bytes, out) -} -// Translates a def_id from an external crate to a def_id for the current -// compilation environment. We use this when trying to load types from -// external crates - if those types further refer to types in other crates -// then we must translate the crate number from that encoded in the external -// crate to the correct local crate number. -pub fn translate_def_id(cdata: Cmd, did: DefId) -> DefId { - if did.is_local() { - return DefId { krate: cdata.cnum, index: did.index }; + pub fn get_native_libraries(&self) -> Vec<(NativeLibraryKind, String)> { + self.root.native_libraries.decode(self).collect() } - DefId { - krate: cdata.cnum_map.borrow()[did.krate], - index: did.index + pub fn get_dylib_dependency_formats(&self) -> Vec<(CrateNum, LinkagePreference)> { + self.root.dylib_dependency_formats.decode(self).enumerate().flat_map(|(i, link)| { + let cnum = CrateNum::new(i + 1); + link.map(|link| (self.cnum_map.borrow()[cnum], link)) + }).collect() } -} -// Translate a DefId from the current compilation environment to a DefId -// for an external crate. -fn reverse_translate_def_id(cdata: Cmd, did: DefId) -> Option { - for (local, &global) in cdata.cnum_map.borrow().iter_enumerated() { - if global == did.krate { - return Some(DefId { krate: local, index: did.index }); - } + pub fn get_missing_lang_items(&self) -> Vec { + self.root.lang_items_missing.decode(self).collect() } - None -} - -/// Translates a `Span` from an extern crate to the corresponding `Span` -/// within the local crate's codemap. -pub fn translate_span(cdata: Cmd, - codemap: &codemap::CodeMap, - last_filemap_index_hint: &Cell, - span: syntax_pos::Span) - -> syntax_pos::Span { - let span = if span.lo > span.hi { - // Currently macro expansion sometimes produces invalid Span values - // where lo > hi. In order not to crash the compiler when trying to - // translate these values, let's transform them into something we - // can handle (and which will produce useful debug locations at - // least some of the time). - // This workaround is only necessary as long as macro expansion is - // not fixed. FIXME(#23480) - syntax_pos::mk_sp(span.lo, span.lo) - } else { - span - }; - - let imported_filemaps = cdata.imported_filemaps(&codemap); - let filemap = { - // Optimize for the case that most spans within a translated item - // originate from the same filemap. - let last_filemap_index = last_filemap_index_hint.get(); - let last_filemap = &imported_filemaps[last_filemap_index]; - - if span.lo >= last_filemap.original_start_pos && - span.lo <= last_filemap.original_end_pos && - span.hi >= last_filemap.original_start_pos && - span.hi <= last_filemap.original_end_pos { - last_filemap - } else { - let mut a = 0; - let mut b = imported_filemaps.len(); + pub fn get_fn_arg_names(&self, id: DefIndex) -> Vec { + let arg_names = match self.entry(id).kind { + EntryKind::Fn(data) | + EntryKind::ForeignFn(data) => data.decode(self).arg_names, + EntryKind::Method(data) => data.decode(self).fn_data.arg_names, + _ => LazySeq::empty() + }; + arg_names.decode(self).collect() + } - while b - a > 1 { - let m = (a + b) / 2; - if imported_filemaps[m].original_start_pos > span.lo { - b = m; - } else { - a = m; - } - } + pub fn get_reachable_ids(&self) -> Vec { + self.root.reachable_ids.decode(self).map(|index| self.local_def_id(index)).collect() + } - last_filemap_index_hint.set(a); - &imported_filemaps[a] - } - }; + pub fn is_const_fn(&self, id: DefIndex) -> bool { + let constness = match self.entry(id).kind { + EntryKind::Method(data) => data.decode(self).fn_data.constness, + EntryKind::Fn(data) => data.decode(self).constness, + _ => hir::Constness::NotConst + }; + constness == hir::Constness::Const + } - let lo = (span.lo - filemap.original_start_pos) + - filemap.translated_filemap.start_pos; - let hi = (span.hi - filemap.original_start_pos) + - filemap.translated_filemap.start_pos; + pub fn is_extern_item(&self, id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> bool { + let item = match self.maybe_entry(id) { + Some(item) => item.decode(self), + None => return false, + }; + let applicable = match item.kind { + EntryKind::ImmStatic | + EntryKind::MutStatic | + EntryKind::ForeignImmStatic | + EntryKind::ForeignMutStatic => true, + + EntryKind::Fn(_) | EntryKind::ForeignFn(_) => { + self.get_generics(id, tcx).types.is_empty() + } - syntax_pos::mk_sp(lo, hi) -} + _ => false, + }; -pub fn each_inherent_implementation_for_type(cdata: Cmd, - id: DefIndex, - mut callback: F) - where F: FnMut(DefId), -{ - let item_doc = cdata.lookup_item(id); - for impl_doc in reader::tagged_docs(item_doc, tag_items_data_item_inherent_impl) { - if reader::maybe_get_doc(impl_doc, tag_item_trait_ref).is_none() { - callback(item_def_id(impl_doc, cdata)); + if applicable { + attr::contains_extern_indicator(tcx.sess.diagnostic(), + &self.get_attributes(&item)) + } else { + false } } -} -pub fn each_implementation_for_trait(cdata: Cmd, - def_id: DefId, - mut callback: F) where - F: FnMut(DefId), -{ - // Do a reverse lookup beforehand to avoid touching the crate_num - // hash map in the loop below. - if let Some(crate_local_did) = reverse_translate_def_id(cdata, def_id) { - let def_id_u64 = def_to_u64(crate_local_did); - - let impls_doc = reader::get_doc(rbml::Doc::new(cdata.data()), tag_impls); - for trait_doc in reader::tagged_docs(impls_doc, tag_impls_trait) { - let trait_def_id = reader::get_doc(trait_doc, tag_def_id); - if reader::doc_as_u64(trait_def_id) != def_id_u64 { - continue; - } - for impl_doc in reader::tagged_docs(trait_doc, tag_impls_trait_impl) { - callback(translated_def_id(cdata, impl_doc)); - } + pub fn is_foreign_item(&self, id: DefIndex) -> bool { + match self.entry(id).kind { + EntryKind::ForeignImmStatic | + EntryKind::ForeignMutStatic | + EntryKind::ForeignFn(_) => true, + _ => false } } -} -pub fn get_trait_of_item<'a, 'tcx>(cdata: Cmd, - id: DefIndex, - tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> Option { - let item_doc = cdata.lookup_item(id); - let parent_item_id = match item_parent_item(cdata, item_doc) { - None => return None, - Some(item_id) => item_id, - }; - let parent_item_doc = cdata.lookup_item(parent_item_id.index); - match item_family(parent_item_doc) { - Trait => Some(item_def_id(parent_item_doc, cdata)), - Impl | DefaultImpl => { - reader::maybe_get_doc(parent_item_doc, tag_item_trait_ref) - .map(|_| item_trait_ref(parent_item_doc, tcx, cdata).def_id) + pub fn is_defaulted_trait(&self, trait_id: DefIndex) -> bool { + match self.entry(trait_id).kind { + EntryKind::Trait(data) => data.decode(self).has_default_impl, + _ => bug!() } - _ => None } -} - -pub fn get_native_libraries(cdata: Cmd) - -> Vec<(cstore::NativeLibraryKind, String)> { - let libraries = reader::get_doc(rbml::Doc::new(cdata.data()), - tag_native_libraries); - reader::tagged_docs(libraries, tag_native_libraries_lib).map(|lib_doc| { - let kind_doc = reader::get_doc(lib_doc, tag_native_libraries_kind); - let name_doc = reader::get_doc(lib_doc, tag_native_libraries_name); - let kind: cstore::NativeLibraryKind = - cstore::NativeLibraryKind::from_u32(reader::doc_as_u32(kind_doc)).unwrap(); - let name = name_doc.as_str().to_string(); - (kind, name) - }).collect() -} - -pub fn get_plugin_registrar_fn(data: &[u8]) -> Option { - reader::maybe_get_doc(rbml::Doc::new(data), tag_plugin_registrar_fn) - .map(|doc| DefIndex::from_u32(reader::doc_as_u32(doc))) -} - -pub fn each_exported_macro(data: &[u8], mut f: F) where - F: FnMut(ast::Name, Vec, Span, String) -> bool, -{ - let macros = reader::get_doc(rbml::Doc::new(data), tag_macro_defs); - for macro_doc in reader::tagged_docs(macros, tag_macro_def) { - let name = item_name(macro_doc); - let attrs = get_attributes(macro_doc); - let span = get_macro_span(macro_doc); - let body = reader::get_doc(macro_doc, tag_macro_def_body); - if !f(name, attrs, span, body.as_str().to_string()) { - break; + pub fn is_default_impl(&self, impl_id: DefIndex) -> bool { + match self.entry(impl_id).kind { + EntryKind::DefaultImpl(_) => true, + _ => false } } -} - -pub fn get_macro_span(doc: rbml::Doc) -> Span { - let lo_doc = reader::get_doc(doc, tag_macro_def_span_lo); - let lo = BytePos(reader::doc_as_u32(lo_doc)); - let hi_doc = reader::get_doc(doc, tag_macro_def_span_hi); - let hi = BytePos(reader::doc_as_u32(hi_doc)); - return Span { lo: lo, hi: hi, expn_id: NO_EXPANSION }; -} - -pub fn get_dylib_dependency_formats(cdata: Cmd) - -> Vec<(ast::CrateNum, LinkagePreference)> -{ - let formats = reader::get_doc(rbml::Doc::new(cdata.data()), - tag_dylib_dependency_formats); - let mut result = Vec::new(); - - debug!("found dylib deps: {}", formats.as_str_slice()); - for spec in formats.as_str_slice().split(',') { - if spec.is_empty() { continue } - let cnum = spec.split(':').nth(0).unwrap(); - let link = spec.split(':').nth(1).unwrap(); - let cnum: ast::CrateNum = cnum.parse().unwrap(); - let cnum = cdata.cnum_map.borrow()[cnum]; - result.push((cnum, if link == "d" { - LinkagePreference::RequireDynamic - } else { - LinkagePreference::RequireStatic - })); - } - return result; -} -pub fn get_missing_lang_items(cdata: Cmd) - -> Vec -{ - let items = reader::get_doc(rbml::Doc::new(cdata.data()), tag_lang_items); - reader::tagged_docs(items, tag_lang_items_missing).map(|missing_docs| { - lang_items::LangItem::from_u32(reader::doc_as_u32(missing_docs)).unwrap() - }).collect() -} - -pub fn get_method_arg_names(cdata: Cmd, id: DefIndex) -> Vec { - let method_doc = cdata.lookup_item(id); - match reader::maybe_get_doc(method_doc, tag_method_argument_names) { - Some(args_doc) => { - reader::tagged_docs(args_doc, tag_method_argument_name).map(|name_doc| { - name_doc.as_str_slice().to_string() - }).collect() - }, - None => vec![], + pub fn closure_kind(&self, closure_id: DefIndex) -> ty::ClosureKind { + match self.entry(closure_id).kind { + EntryKind::Closure(data) => data.decode(self).kind, + _ => bug!() + } } -} -pub fn get_reachable_ids(cdata: Cmd) -> Vec { - let items = reader::get_doc(rbml::Doc::new(cdata.data()), - tag_reachable_ids); - reader::tagged_docs(items, tag_reachable_id).map(|doc| { - DefId { - krate: cdata.cnum, - index: DefIndex::from_u32(reader::doc_as_u32(doc)), + pub fn closure_ty(&self, closure_id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> ty::ClosureTy<'tcx> { + match self.entry(closure_id).kind { + EntryKind::Closure(data) => data.decode(self).ty.decode((self, tcx)), + _ => bug!() } - }).collect() -} - -pub fn is_typedef(cdata: Cmd, id: DefIndex) -> bool { - let item_doc = cdata.lookup_item(id); - match item_family(item_doc) { - Type => true, - _ => false, } -} -pub fn is_const_fn(cdata: Cmd, id: DefIndex) -> bool { - let item_doc = cdata.lookup_item(id); - match fn_constness(item_doc) { - hir::Constness::Const => true, - hir::Constness::NotConst => false, + pub fn def_key(&self, id: DefIndex) -> hir_map::DefKey { + debug!("def_key: id={:?}", id); + self.entry(id).def_key.decode(self) } -} -pub fn is_extern_item<'a, 'tcx>(cdata: Cmd, - id: DefIndex, - tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> bool { - let item_doc = match cdata.get_item(id) { - Some(doc) => doc, - None => return false, - }; - let applicable = match item_family(item_doc) { - ImmStatic | MutStatic => true, - Fn => { - let ty::TypeScheme { generics, .. } = get_type(cdata, id, tcx); - let no_generics = generics.types.is_empty(); - no_generics - }, - _ => false, - }; - - if applicable { - attr::contains_extern_indicator(tcx.sess.diagnostic(), - &get_attributes(item_doc)) - } else { - false + // Returns the path leading to the thing with this `id`. Note that + // some def-ids don't wind up in the metadata, so `def_path` sometimes + // returns `None` + pub fn def_path(&self, id: DefIndex) -> Option { + debug!("def_path(id={:?})", id); + if self.maybe_entry(id).is_some() { + Some(hir_map::DefPath::make(self.cnum, id, |parent| self.def_key(parent))) + } else { + None + } } -} -pub fn is_foreign_item(cdata: Cmd, id: DefIndex) -> bool { - let item_doc = cdata.lookup_item(id); - let parent_item_id = match item_parent_item(cdata, item_doc) { - None => return false, - Some(item_id) => item_id, - }; - let parent_item_doc = cdata.lookup_item(parent_item_id.index); - item_family(parent_item_doc) == ForeignMod -} - -pub fn is_impl(cdata: Cmd, id: DefIndex) -> bool { - let item_doc = cdata.lookup_item(id); - match item_family(item_doc) { - Impl => true, - _ => false, - } -} + /// Imports the codemap from an external crate into the codemap of the crate + /// currently being compiled (the "local crate"). + /// + /// The import algorithm works analogous to how AST items are inlined from an + /// external crate's metadata: + /// For every FileMap in the external codemap an 'inline' copy is created in the + /// local codemap. The correspondence relation between external and local + /// FileMaps is recorded in the `ImportedFileMap` objects returned from this + /// function. When an item from an external crate is later inlined into this + /// crate, this correspondence information is used to translate the span + /// information of the inlined item so that it refers the correct positions in + /// the local codemap (see `>`). + /// + /// The import algorithm in the function below will reuse FileMaps already + /// existing in the local codemap. For example, even if the FileMap of some + /// source file of libstd gets imported many times, there will only ever be + /// one FileMap object for the corresponding file in the local codemap. + /// + /// Note that imported FileMaps do not actually contain the source code of the + /// file they represent, just information about length, line breaks, and + /// multibyte characters. This information is enough to generate valid debuginfo + /// for items inlined from other crates. + pub fn imported_filemaps(&'a self, local_codemap: &codemap::CodeMap) + -> Ref<'a, Vec> { + { + let filemaps = self.codemap_import_info.borrow(); + if !filemaps.is_empty() { + return filemaps; + } + } -fn doc_generics<'a, 'tcx>(base_doc: rbml::Doc, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - cdata: Cmd, - tag: usize) - -> ty::Generics<'tcx> -{ - let doc = reader::get_doc(base_doc, tag); - - let mut types = subst::VecPerParamSpace::empty(); - for p in reader::tagged_docs(doc, tag_type_param_def) { - let bd = - TyDecoder::with_doc(tcx, cdata.cnum, p, - &mut |did| translate_def_id(cdata, did)) - .parse_type_param_def(); - types.push(bd.space, bd); - } - - let mut regions = subst::VecPerParamSpace::empty(); - for p in reader::tagged_docs(doc, tag_region_param_def) { - let bd = - TyDecoder::with_doc(tcx, cdata.cnum, p, - &mut |did| translate_def_id(cdata, did)) - .parse_region_param_def(); - regions.push(bd.space, bd); - } - - ty::Generics { types: types, regions: regions } -} + let external_codemap = self.root.codemap.decode(self); + + let imported_filemaps = external_codemap.map(|filemap_to_import| { + // Try to find an existing FileMap that can be reused for the filemap to + // be imported. A FileMap is reusable if it is exactly the same, just + // positioned at a different offset within the codemap. + let reusable_filemap = { + local_codemap.files + .borrow() + .iter() + .find(|fm| are_equal_modulo_startpos(&fm, &filemap_to_import)) + .map(|rc| rc.clone()) + }; -fn doc_predicate<'a, 'tcx>(cdata: Cmd, - doc: rbml::Doc, - tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> ty::Predicate<'tcx> -{ - let predicate_pos = cdata.xref_index.lookup( - cdata.data(), reader::doc_as_u32(doc)).unwrap() as usize; - TyDecoder::new( - cdata.data(), cdata.cnum, predicate_pos, tcx, - &mut |did| translate_def_id(cdata, did) - ).parse_predicate() -} + match reusable_filemap { + Some(fm) => { + cstore::ImportedFileMap { + original_start_pos: filemap_to_import.start_pos, + original_end_pos: filemap_to_import.end_pos, + translated_filemap: fm + } + } + None => { + // We can't reuse an existing FileMap, so allocate a new one + // containing the information we need. + let syntax_pos::FileMap { + name, + abs_path, + start_pos, + end_pos, + lines, + multibyte_chars, + .. + } = filemap_to_import; + + let source_length = (end_pos - start_pos).to_usize(); + + // Translate line-start positions and multibyte character + // position into frame of reference local to file. + // `CodeMap::new_imported_filemap()` will then translate those + // coordinates to their new global frame of reference when the + // offset of the FileMap is known. + let mut lines = lines.into_inner(); + for pos in &mut lines { + *pos = *pos - start_pos; + } + let mut multibyte_chars = multibyte_chars.into_inner(); + for mbc in &mut multibyte_chars { + mbc.pos = mbc.pos - start_pos; + } -fn doc_predicates<'a, 'tcx>(base_doc: rbml::Doc, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - cdata: Cmd, - tag: usize) - -> ty::GenericPredicates<'tcx> -{ - let doc = reader::get_doc(base_doc, tag); + let local_version = local_codemap.new_imported_filemap(name, + abs_path, + source_length, + lines, + multibyte_chars); + cstore::ImportedFileMap { + original_start_pos: start_pos, + original_end_pos: end_pos, + translated_filemap: local_version + } + } + } + }).collect(); - let mut predicates = subst::VecPerParamSpace::empty(); - for predicate_doc in reader::tagged_docs(doc, tag_type_predicate) { - predicates.push(subst::TypeSpace, - doc_predicate(cdata, predicate_doc, tcx)); - } - for predicate_doc in reader::tagged_docs(doc, tag_self_predicate) { - predicates.push(subst::SelfSpace, - doc_predicate(cdata, predicate_doc, tcx)); + // This shouldn't borrow twice, but there is no way to downgrade RefMut to Ref. + *self.codemap_import_info.borrow_mut() = imported_filemaps; + self.codemap_import_info.borrow() } - for predicate_doc in reader::tagged_docs(doc, tag_fn_predicate) { - predicates.push(subst::FnSpace, - doc_predicate(cdata, predicate_doc, tcx)); - } - - ty::GenericPredicates { predicates: predicates } } -pub fn is_defaulted_trait(cdata: Cmd, trait_id: DefIndex) -> bool { - let trait_doc = cdata.lookup_item(trait_id); - assert!(item_family(trait_doc) == Family::Trait); - let defaulted_doc = reader::get_doc(trait_doc, tag_defaulted_trait); - reader::doc_as_u8(defaulted_doc) != 0 -} +fn are_equal_modulo_startpos(fm1: &syntax_pos::FileMap, fm2: &syntax_pos::FileMap) -> bool { + if fm1.byte_length() != fm2.byte_length() { + return false; + } -pub fn is_default_impl(cdata: Cmd, impl_id: DefIndex) -> bool { - let impl_doc = cdata.lookup_item(impl_id); - item_family(impl_doc) == Family::DefaultImpl -} + if fm1.name != fm2.name { + return false; + } -pub fn get_imported_filemaps(metadata: &[u8]) -> Vec { - let crate_doc = rbml::Doc::new(metadata); - let cm_doc = reader::get_doc(crate_doc, tag_codemap); + let lines1 = fm1.lines.borrow(); + let lines2 = fm2.lines.borrow(); - reader::tagged_docs(cm_doc, tag_codemap_filemap).map(|filemap_doc| { - let mut decoder = reader::Decoder::new(filemap_doc); - decoder.read_opaque(|opaque_decoder, _| { - Decodable::decode(opaque_decoder) - }).unwrap() - }).collect() -} + if lines1.len() != lines2.len() { + return false; + } -pub fn closure_kind(cdata: Cmd, closure_id: DefIndex) -> ty::ClosureKind { - let closure_doc = cdata.lookup_item(closure_id); - let closure_kind_doc = reader::get_doc(closure_doc, tag_items_closure_kind); - let mut decoder = reader::Decoder::new(closure_kind_doc); - ty::ClosureKind::decode(&mut decoder).unwrap() -} + for (&line1, &line2) in lines1.iter().zip(lines2.iter()) { + if (line1 - fm1.start_pos) != (line2 - fm2.start_pos) { + return false; + } + } -pub fn closure_ty<'a, 'tcx>(cdata: Cmd, closure_id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> ty::ClosureTy<'tcx> { - let closure_doc = cdata.lookup_item(closure_id); - let closure_ty_doc = reader::get_doc(closure_doc, tag_items_closure_ty); - TyDecoder::with_doc(tcx, cdata.cnum, closure_ty_doc, &mut |did| translate_def_id(cdata, did)) - .parse_closure_ty() -} + let multibytes1 = fm1.multibyte_chars.borrow(); + let multibytes2 = fm2.multibyte_chars.borrow(); -pub fn def_key(cdata: Cmd, id: DefIndex) -> hir_map::DefKey { - debug!("def_key: id={:?}", id); - let item_doc = cdata.lookup_item(id); - item_def_key(item_doc) -} + if multibytes1.len() != multibytes2.len() { + return false; + } -fn item_def_key(item_doc: rbml::Doc) -> hir_map::DefKey { - match reader::maybe_get_doc(item_doc, tag_def_key) { - Some(def_key_doc) => { - let mut decoder = reader::Decoder::new(def_key_doc); - let simple_key = def_key::DefKey::decode(&mut decoder).unwrap(); - let name = reader::maybe_get_doc(item_doc, tag_paths_data_name).map(|name| { - token::intern(name.as_str_slice()).as_str() - }); - def_key::recover_def_key(simple_key, name) - } - None => { - bug!("failed to find block with tag {:?} for item with family {:?}", - tag_def_key, - item_family(item_doc)) + for (mb1, mb2) in multibytes1.iter().zip(multibytes2.iter()) { + if (mb1.bytes != mb2.bytes) || + ((mb1.pos - fm1.start_pos) != (mb2.pos - fm2.start_pos)) { + return false; } } -} -pub fn def_path(cdata: Cmd, id: DefIndex) -> hir_map::DefPath { - debug!("def_path(id={:?})", id); - hir_map::DefPath::make(cdata.cnum, id, |parent| def_key(cdata, parent)) -} - -pub fn get_panic_strategy(data: &[u8]) -> PanicStrategy { - let crate_doc = rbml::Doc::new(data); - let strat_doc = reader::get_doc(crate_doc, tag_panic_strategy); - match reader::doc_as_u8(strat_doc) { - b'U' => PanicStrategy::Unwind, - b'A' => PanicStrategy::Abort, - b => panic!("unknown panic strategy in metadata: {}", b), - } + true } diff --git a/src/librustc_metadata/def_key.rs b/src/librustc_metadata/def_key.rs deleted file mode 100644 index 285ca2e4d4..0000000000 --- a/src/librustc_metadata/def_key.rs +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::hir::def_id::DefIndex; -use rustc::hir::map as hir_map; -use syntax::parse::token::InternedString; - -#[derive(RustcEncodable, RustcDecodable)] -pub struct DefKey { - pub parent: Option, - pub disambiguated_data: DisambiguatedDefPathData, -} - -#[derive(RustcEncodable, RustcDecodable)] -pub struct DisambiguatedDefPathData { - pub data: DefPathData, - pub disambiguator: u32, -} - -#[derive(RustcEncodable, RustcDecodable)] -pub enum DefPathData { - CrateRoot, - Misc, - Impl, - TypeNs, - ValueNs, - Module, - MacroDef, - ClosureExpr, - TypeParam, - LifetimeDef, - EnumVariant, - Field, - StructCtor, - Initializer, - Binding, - ImplTrait, -} - -pub fn simplify_def_key(key: hir_map::DefKey) -> DefKey { - let data = DisambiguatedDefPathData { - data: simplify_def_path_data(key.disambiguated_data.data), - disambiguator: key.disambiguated_data.disambiguator, - }; - DefKey { - parent: key.parent, - disambiguated_data: data, - } -} - -fn simplify_def_path_data(data: hir_map::DefPathData) -> DefPathData { - match data { - hir_map::DefPathData::CrateRoot => DefPathData::CrateRoot, - hir_map::DefPathData::InlinedRoot(_) => bug!("unexpected DefPathData"), - hir_map::DefPathData::Misc => DefPathData::Misc, - hir_map::DefPathData::Impl => DefPathData::Impl, - hir_map::DefPathData::TypeNs(_) => DefPathData::TypeNs, - hir_map::DefPathData::ValueNs(_) => DefPathData::ValueNs, - hir_map::DefPathData::Module(_) => DefPathData::Module, - hir_map::DefPathData::MacroDef(_) => DefPathData::MacroDef, - hir_map::DefPathData::ClosureExpr => DefPathData::ClosureExpr, - hir_map::DefPathData::TypeParam(_) => DefPathData::TypeParam, - hir_map::DefPathData::LifetimeDef(_) => DefPathData::LifetimeDef, - hir_map::DefPathData::EnumVariant(_) => DefPathData::EnumVariant, - hir_map::DefPathData::Field(_) => DefPathData::Field, - hir_map::DefPathData::StructCtor => DefPathData::StructCtor, - hir_map::DefPathData::Initializer => DefPathData::Initializer, - hir_map::DefPathData::Binding(_) => DefPathData::Binding, - hir_map::DefPathData::ImplTrait => DefPathData::ImplTrait, - } -} - -pub fn recover_def_key(key: DefKey, name: Option) -> hir_map::DefKey { - let data = hir_map::DisambiguatedDefPathData { - data: recover_def_path_data(key.disambiguated_data.data, name), - disambiguator: key.disambiguated_data.disambiguator, - }; - hir_map::DefKey { - parent: key.parent, - disambiguated_data: data, - } -} - -fn recover_def_path_data(data: DefPathData, name: Option) -> hir_map::DefPathData { - match data { - DefPathData::CrateRoot => hir_map::DefPathData::CrateRoot, - DefPathData::Misc => hir_map::DefPathData::Misc, - DefPathData::Impl => hir_map::DefPathData::Impl, - DefPathData::TypeNs => hir_map::DefPathData::TypeNs(name.unwrap()), - DefPathData::ValueNs => hir_map::DefPathData::ValueNs(name.unwrap()), - DefPathData::Module => hir_map::DefPathData::Module(name.unwrap()), - DefPathData::MacroDef => hir_map::DefPathData::MacroDef(name.unwrap()), - DefPathData::ClosureExpr => hir_map::DefPathData::ClosureExpr, - DefPathData::TypeParam => hir_map::DefPathData::TypeParam(name.unwrap()), - DefPathData::LifetimeDef => hir_map::DefPathData::LifetimeDef(name.unwrap()), - DefPathData::EnumVariant => hir_map::DefPathData::EnumVariant(name.unwrap()), - DefPathData::Field => hir_map::DefPathData::Field(name.unwrap()), - DefPathData::StructCtor => hir_map::DefPathData::StructCtor, - DefPathData::Initializer => hir_map::DefPathData::Initializer, - DefPathData::Binding => hir_map::DefPathData::Binding(name.unwrap()), - DefPathData::ImplTrait => hir_map::DefPathData::ImplTrait, - } -} diff --git a/src/librustc_metadata/diagnostics.rs b/src/librustc_metadata/diagnostics.rs index ae9f500c5d..f52e1437ac 100644 --- a/src/librustc_metadata/diagnostics.rs +++ b/src/librustc_metadata/diagnostics.rs @@ -14,14 +14,14 @@ register_long_diagnostics! { E0454: r##" A link name was given with an empty name. Erroneous code example: -``` +```compile_fail,E0454 #[link(name = "")] extern {} // error: #[link(name = "")] given with empty name ``` The rust compiler cannot link to an external library if you don't give it its name. Example: -``` +```ignore #[link(name = "some_lib")] extern {} // ok! ``` "##, @@ -32,8 +32,8 @@ as frameworks are specific to that operating system. Erroneous code example: -```compile_fail" -#[link(name = "FooCoreServices", kind = "framework")] extern {} +```ignore +#[link(name = "FooCoreServices", kind = "framework")] extern {} // OS used to compile is Linux for example ``` @@ -50,7 +50,7 @@ See more: https://doc.rust-lang.org/book/conditional-compilation.html E0458: r##" An unknown "kind" was specified for a link attribute. Erroneous code example: -``` +```compile_fail,E0458 #[link(kind = "wonderful_unicorn")] extern {} // error: unknown kind: `wonderful_unicorn` ``` @@ -64,7 +64,7 @@ Please specify a valid "kind" value, from one of the following: E0459: r##" A link was used without a name parameter. Erroneous code example: -``` +```compile_fail,E0459 #[link(kind = "dylib")] extern {} // error: #[link(...)] specified without `name = "foo"` ``` @@ -72,7 +72,7 @@ A link was used without a name parameter. Erroneous code example: Please add the name parameter to allow the rust compiler to find the library you want. Example: -``` +```ignore #[link(kind = "dylib", name = "some_lib")] extern {} // ok! ``` "##, @@ -80,7 +80,7 @@ you want. Example: E0463: r##" A plugin/crate was declared but cannot be found. Erroneous code example: -``` +```compile_fail,E0463 #![feature(plugin)] #![plugin(cookie_monster)] // error: can't find crate for `cookie_monster` extern crate cake_is_a_lie; // error: can't find crate for `cake_is_a_lie` @@ -91,6 +91,185 @@ You need to link your code to the relevant crate in order to be able to use it well, and you link to them the same way. "##, +E0466: r##" +Macro import declarations were malformed. + +Erroneous code examples: + +```compile_fail,E0466 +#[macro_use(a_macro(another_macro))] // error: invalid import declaration +extern crate some_crate; + +#[macro_use(i_want = "some_macros")] // error: invalid import declaration +extern crate another_crate; +``` + +This is a syntax error at the level of attribute declarations. The proper +syntax for macro imports is the following: + +```ignore +// In some_crate: +#[macro_export] +macro_rules! get_tacos { + ... +} + +#[macro_export] +macro_rules! get_pimientos { + ... +} + +// In your crate: +#[macro_use(get_tacos, get_pimientos)] // It imports `get_tacos` and +extern crate some_crate; // `get_pimientos` macros from some_crate +``` + +If you would like to import all exported macros, write `macro_use` with no +arguments. +"##, + +E0467: r##" +Macro reexport declarations were empty or malformed. + +Erroneous code examples: + +```compile_fail,E0467 +#[macro_reexport] // error: no macros listed for export +extern crate macros_for_good; + +#[macro_reexport(fun_macro = "foo")] // error: not a macro identifier +extern crate other_macros_for_good; +``` + +This is a syntax error at the level of attribute declarations. + +Currently, `macro_reexport` requires at least one macro name to be listed. +Unlike `macro_use`, listing no names does not reexport all macros from the +given crate. + +Decide which macros you would like to export and list them properly. + +These are proper reexport declarations: + +```ignore +#[macro_reexport(some_macro, another_macro)] +extern crate macros_for_good; +``` +"##, + +E0468: r##" +A non-root module attempts to import macros from another crate. + +Example of erroneous code: + +```compile_fail,E0468 +mod foo { + #[macro_use(helpful_macro)] // error: must be at crate root to import + extern crate some_crate; // macros from another crate + helpful_macro!(...) +} +``` + +Only `extern crate` imports at the crate root level are allowed to import +macros. + +Either move the macro import to crate root or do without the foreign macros. +This will work: + +```ignore +#[macro_use(helpful_macro)] +extern crate some_crate; + +mod foo { + helpful_macro!(...) +} +``` +"##, + +E0469: r##" +A macro listed for import was not found. + +Erroneous code example: + +```compile_fail,E0469 +#[macro_use(drink, be_merry)] // error: imported macro not found +extern crate collections; + +fn main() { + // ... +} +``` + +Either the listed macro is not contained in the imported crate, or it is not +exported from the given crate. + +This could be caused by a typo. Did you misspell the macro's name? + +Double-check the names of the macros listed for import, and that the crate +in question exports them. + +A working version would be: + +```ignore +// In some_crate crate: +#[macro_export] +macro_rules! eat { + ... +} + +#[macro_export] +macro_rules! drink { + ... +} + +// In your crate: +#[macro_use(eat, drink)] +extern crate some_crate; //ok! +``` +"##, + +E0470: r##" +A macro listed for reexport was not found. + +Erroneous code example: + +```compile_fail,E0470 +#[macro_reexport(drink, be_merry)] +extern crate collections; + +fn main() { + // ... +} +``` + +Either the listed macro is not contained in the imported crate, or it is not +exported from the given crate. + +This could be caused by a typo. Did you misspell the macro's name? + +Double-check the names of the macros listed for reexport, and that the crate +in question exports them. + +A working version: + +```ignore +// In some_crate crate: +#[macro_export] +macro_rules! eat { + ... +} + +#[macro_export] +macro_rules! drink { + ... +} + +// In your_crate: +#[macro_reexport(eat, drink)] +extern crate some_crate; +``` +"##, + } register_diagnostics! { @@ -102,11 +281,6 @@ register_diagnostics! { E0462, // found staticlib `..` instead of rlib or dylib E0464, // multiple matching crates for `..` E0465, // multiple .. candidates for `..` found - E0466, // bad macro import - E0467, // bad macro reexport - E0468, // an `extern crate` loading macros must be at the crate root - E0469, // imported macro not found - E0470, // reexported macro not found E0519, // local crate and dependency have same (crate-name, disambiguator) E0523, // two dependencies have same (crate-name, disambiguator) but different SVH } diff --git a/src/librustc_metadata/encoder.rs b/src/librustc_metadata/encoder.rs index cc1d07b33c..0f067270b8 100644 --- a/src/librustc_metadata/encoder.rs +++ b/src/librustc_metadata/encoder.rs @@ -8,1670 +8,1174 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// Metadata encoding - -#![allow(unused_must_use)] // everything is just a MemWriter, can't fail -#![allow(non_camel_case_types)] - -use astencode::encode_inlined_item; -use common::*; use cstore; -use decoder; -use def_key; -use tyencode; -use index::{self, IndexData}; +use index::Index; +use schema::*; -use middle::cstore::{LOCAL_CRATE, InlinedItemRef, LinkMeta, tls}; +use rustc::middle::cstore::{InlinedItemRef, LinkMeta}; +use rustc::middle::cstore::{LinkagePreference, NativeLibraryKind}; use rustc::hir::def; -use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId}; -use middle::dependency_format::Linkage; -use rustc::dep_graph::{DepGraph, DepNode, DepTask}; -use rustc::ty::subst; +use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefIndex, DefId}; +use rustc::middle::dependency_format::Linkage; +use rustc::middle::lang_items; +use rustc::mir; use rustc::traits::specialization_graph; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::util::IntTypeExt; -use rustc::hir::svh::Svh; use rustc::mir::mir_map::MirMap; -use rustc::session::config::{self, PanicStrategy}; +use rustc::session::config::{self, CrateTypeRustcMacro}; use rustc::util::nodemap::{FnvHashMap, NodeSet}; -use rustc_serialize::Encodable; -use std::cell::RefCell; +use rustc_serialize::{Encodable, Encoder, SpecializedEncoder, opaque}; +use std::hash::Hash; +use std::intrinsics; use std::io::prelude::*; -use std::io::{Cursor, SeekFrom}; +use std::io::Cursor; use std::rc::Rc; use std::u32; -use syntax::abi::Abi; -use syntax::ast::{self, NodeId, Name, CRATE_NODE_ID, CrateNum}; -use syntax::attr::{self,AttrMetaMethods,AttributeMethods}; -use errors::Handler; +use syntax::ast::{self, CRATE_NODE_ID}; +use syntax::attr; use syntax; -use syntax_pos::BytePos; -use rbml::writer::Encoder; +use syntax_pos; use rustc::hir::{self, PatKind}; use rustc::hir::intravisit::Visitor; use rustc::hir::intravisit; -use rustc::hir::map::DefKey; + +use super::index_builder::{FromId, IndexBuilder, Untracked}; pub struct EncodeContext<'a, 'tcx: 'a> { - pub diag: &'a Handler, + opaque: opaque::Encoder<'a>, pub tcx: TyCtxt<'a, 'tcx, 'tcx>, - pub reexports: &'a def::ExportMap, - pub link_meta: &'a LinkMeta, - pub cstore: &'a cstore::CStore, - pub type_abbrevs: tyencode::abbrev_map<'tcx>, - pub reachable: &'a NodeSet, - pub mir_map: &'a MirMap<'tcx>, -} - -impl<'a, 'tcx> EncodeContext<'a,'tcx> { - fn local_id(&self, def_id: DefId) -> NodeId { - self.tcx.map.as_local_node_id(def_id).unwrap() + reexports: &'a def::ExportMap, + link_meta: &'a LinkMeta, + cstore: &'a cstore::CStore, + reachable: &'a NodeSet, + mir_map: &'a MirMap<'tcx>, + + lazy_state: LazyState, + type_shorthands: FnvHashMap, usize>, + predicate_shorthands: FnvHashMap, usize>, +} + +macro_rules! encoder_methods { + ($($name:ident($ty:ty);)*) => { + $(fn $name(&mut self, value: $ty) -> Result<(), Self::Error> { + self.opaque.$name(value) + })* } } -/// "interned" entries referenced by id -#[derive(PartialEq, Eq, Hash)] -pub enum XRef<'tcx> { Predicate(ty::Predicate<'tcx>) } +impl<'a, 'tcx> Encoder for EncodeContext<'a, 'tcx> { + type Error = as Encoder>::Error; -struct CrateIndex<'a, 'tcx> { - dep_graph: &'a DepGraph, - items: IndexData, - xrefs: FnvHashMap, u32>, // sequentially-assigned -} - -impl<'a, 'tcx> CrateIndex<'a, 'tcx> { - /// Records that `id` is being emitted at the current offset. - /// This data is later used to construct the item index in the - /// metadata so we can quickly find the data for a given item. - /// - /// Returns a dep-graph task that you should keep live as long as - /// the data for this item is being emitted. - fn record(&mut self, id: DefId, rbml_w: &mut Encoder) -> DepTask<'a> { - let position = rbml_w.mark_stable_position(); - self.items.record(id, position); - self.dep_graph.in_task(DepNode::MetaData(id)) + fn emit_nil(&mut self) -> Result<(), Self::Error> { + Ok(()) } - fn add_xref(&mut self, xref: XRef<'tcx>) -> u32 { - let old_len = self.xrefs.len() as u32; - *self.xrefs.entry(xref).or_insert(old_len) + encoder_methods! { + emit_usize(usize); + emit_u64(u64); + emit_u32(u32); + emit_u16(u16); + emit_u8(u8); + + emit_isize(isize); + emit_i64(i64); + emit_i32(i32); + emit_i16(i16); + emit_i8(i8); + + emit_bool(bool); + emit_f64(f64); + emit_f32(f32); + emit_char(char); + emit_str(&str); } } -fn encode_name(rbml_w: &mut Encoder, name: Name) { - rbml_w.wr_tagged_str(tag_paths_data_name, &name.as_str()); -} - -fn encode_def_id(rbml_w: &mut Encoder, id: DefId) { - rbml_w.wr_tagged_u64(tag_def_id, def_to_u64(id)); -} - -fn encode_def_key(rbml_w: &mut Encoder, key: DefKey) { - let simple_key = def_key::simplify_def_key(key); - rbml_w.start_tag(tag_def_key); - simple_key.encode(rbml_w); - rbml_w.end_tag(); -} - -/// For every DefId that we create a metadata item for, we include a -/// serialized copy of its DefKey, which allows us to recreate a path. -fn encode_def_id_and_key(ecx: &EncodeContext, - rbml_w: &mut Encoder, - def_id: DefId) -{ - encode_def_id(rbml_w, def_id); - let def_key = ecx.tcx.map.def_key(def_id); - encode_def_key(rbml_w, def_key); -} - -fn encode_trait_ref<'a, 'tcx>(rbml_w: &mut Encoder, - ecx: &EncodeContext<'a, 'tcx>, - trait_ref: ty::TraitRef<'tcx>, - tag: usize) { - rbml_w.start_tag(tag); - tyencode::enc_trait_ref(rbml_w.writer, &ecx.ty_str_ctxt(), trait_ref); - rbml_w.mark_stable_position(); - rbml_w.end_tag(); +impl<'a, 'tcx, T> SpecializedEncoder> for EncodeContext<'a, 'tcx> { + fn specialized_encode(&mut self, lazy: &Lazy) -> Result<(), Self::Error> { + self.emit_lazy_distance(lazy.position, Lazy::::min_size()) + } } -// Item info table encoding -fn encode_family(rbml_w: &mut Encoder, c: char) { - rbml_w.wr_tagged_u8(tag_items_data_item_family, c as u8); +impl<'a, 'tcx, T> SpecializedEncoder> for EncodeContext<'a, 'tcx> { + fn specialized_encode(&mut self, seq: &LazySeq) -> Result<(), Self::Error> { + self.emit_usize(seq.len)?; + if seq.len == 0 { + return Ok(()); + } + self.emit_lazy_distance(seq.position, LazySeq::::min_size(seq.len)) + } } -pub fn def_to_u64(did: DefId) -> u64 { - assert!(did.index.as_u32() < u32::MAX); - (did.krate as u64) << 32 | (did.index.as_usize() as u64) +impl<'a, 'tcx> SpecializedEncoder> for EncodeContext<'a, 'tcx> { + fn specialized_encode(&mut self, ty: &Ty<'tcx>) -> Result<(), Self::Error> { + self.encode_with_shorthand(ty, &ty.sty, |ecx| &mut ecx.type_shorthands) + } } -pub fn def_to_string(_tcx: TyCtxt, did: DefId) -> String { - format!("{}:{}", did.krate, did.index.as_usize()) +impl<'a, 'tcx> SpecializedEncoder> for EncodeContext<'a, 'tcx> { + fn specialized_encode(&mut self, predicates: &ty::GenericPredicates<'tcx>) + -> Result<(), Self::Error> { + predicates.parent.encode(self)?; + predicates.predicates.len().encode(self)?; + for predicate in &predicates.predicates { + self.encode_with_shorthand(predicate, predicate, |ecx| &mut ecx.predicate_shorthands)? + } + Ok(()) + } } -fn encode_item_variances(rbml_w: &mut Encoder, - ecx: &EncodeContext, - id: NodeId) { - let v = ecx.tcx.item_variances(ecx.tcx.map.local_def_id(id)); - rbml_w.start_tag(tag_item_variances); - v.encode(rbml_w); - rbml_w.end_tag(); -} +impl<'a, 'tcx> EncodeContext<'a, 'tcx> { + pub fn position(&self) -> usize { + self.opaque.position() + } -fn encode_bounds_and_type_for_item<'a, 'tcx>(rbml_w: &mut Encoder, - ecx: &EncodeContext<'a, 'tcx>, - index: &mut CrateIndex<'a, 'tcx>, - id: NodeId) { - encode_bounds_and_type(rbml_w, - ecx, - index, - &ecx.tcx.lookup_item_type(ecx.tcx.map.local_def_id(id)), - &ecx.tcx.lookup_predicates(ecx.tcx.map.local_def_id(id))); -} + fn emit_node R, R>(&mut self, f: F) -> R { + assert_eq!(self.lazy_state, LazyState::NoNode); + let pos = self.position(); + self.lazy_state = LazyState::NodeStart(pos); + let r = f(self, pos); + self.lazy_state = LazyState::NoNode; + r + } -fn encode_bounds_and_type<'a, 'tcx>(rbml_w: &mut Encoder, - ecx: &EncodeContext<'a, 'tcx>, - index: &mut CrateIndex<'a, 'tcx>, - scheme: &ty::TypeScheme<'tcx>, - predicates: &ty::GenericPredicates<'tcx>) { - encode_generics(rbml_w, ecx, index, - &scheme.generics, &predicates, tag_item_generics); - encode_type(ecx, rbml_w, scheme.ty); -} + fn emit_lazy_distance(&mut self, position: usize, min_size: usize) + -> Result<(), ::Error> { + let min_end = position + min_size; + let distance = match self.lazy_state { + LazyState::NoNode => { + bug!("emit_lazy_distance: outside of a metadata node") + } + LazyState::NodeStart(start) => { + assert!(min_end <= start); + start - min_end + } + LazyState::Previous(last_min_end) => { + assert!(last_min_end <= position); + position - last_min_end + } + }; + self.lazy_state = LazyState::Previous(min_end); + self.emit_usize(distance) + } -fn encode_variant_id(rbml_w: &mut Encoder, vid: DefId) { - let id = def_to_u64(vid); - rbml_w.wr_tagged_u64(tag_items_data_item_variant, id); - rbml_w.wr_tagged_u64(tag_mod_child, id); -} + pub fn lazy(&mut self, value: &T) -> Lazy { + self.emit_node(|ecx, pos| { + value.encode(ecx).unwrap(); -fn write_closure_type<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - closure_type: &ty::ClosureTy<'tcx>) { - tyencode::enc_closure_ty(rbml_w.writer, &ecx.ty_str_ctxt(), closure_type); - rbml_w.mark_stable_position(); -} + assert!(pos + Lazy::::min_size() <= ecx.position()); + Lazy::with_position(pos) + }) + } -fn encode_type<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - typ: Ty<'tcx>) { - rbml_w.start_tag(tag_items_data_item_type); - tyencode::enc_ty(rbml_w.writer, &ecx.ty_str_ctxt(), typ); - rbml_w.mark_stable_position(); - rbml_w.end_tag(); -} + fn lazy_seq(&mut self, iter: I) -> LazySeq + where I: IntoIterator, T: Encodable { + self.emit_node(|ecx, pos| { + let len = iter.into_iter().map(|value| value.encode(ecx).unwrap()).count(); -fn encode_disr_val(_: &EncodeContext, - rbml_w: &mut Encoder, - disr_val: ty::Disr) { - // convert to u64 so just the number is printed, without any type info - rbml_w.wr_tagged_str(tag_disr_val, &disr_val.to_u64_unchecked().to_string()); -} + assert!(pos + LazySeq::::min_size(len) <= ecx.position()); + LazySeq::with_position_and_length(pos, len) + }) + } -fn encode_parent_item(rbml_w: &mut Encoder, id: DefId) { - rbml_w.wr_tagged_u64(tag_items_data_parent_item, def_to_u64(id)); -} + fn lazy_seq_ref<'b, I, T>(&mut self, iter: I) -> LazySeq + where I: IntoIterator, T: 'b + Encodable { + self.emit_node(|ecx, pos| { + let len = iter.into_iter().map(|value| value.encode(ecx).unwrap()).count(); -fn encode_struct_fields(rbml_w: &mut Encoder, - variant: ty::VariantDef) { - for f in &variant.fields { - if variant.kind == ty::VariantKind::Tuple { - rbml_w.start_tag(tag_item_unnamed_field); - } else { - rbml_w.start_tag(tag_item_field); - encode_name(rbml_w, f.name); - } - encode_struct_field_family(rbml_w, f.vis); - encode_def_id(rbml_w, f.did); - rbml_w.end_tag(); + assert!(pos + LazySeq::::min_size(len) <= ecx.position()); + LazySeq::with_position_and_length(pos, len) + }) } -} -fn encode_enum_variant_info<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - did: DefId, - vis: &hir::Visibility, - index: &mut CrateIndex<'a, 'tcx>) { - debug!("encode_enum_variant_info(did={:?})", did); - let repr_hints = ecx.tcx.lookup_repr_hints(did); - let repr_type = ecx.tcx.enum_repr_type(repr_hints.get(0)); - let mut disr_val = repr_type.initial_discriminant(ecx.tcx); - let def = ecx.tcx.lookup_adt_def(did); - for variant in &def.variants { - let vid = variant.did; - let variant_node_id = ecx.local_id(vid); - - for field in &variant.fields { - encode_field(ecx, rbml_w, field, index); + /// Encode the given value or a previously cached shorthand. + fn encode_with_shorthand(&mut self, value: &T, variant: &U, map: M) + -> Result<(), ::Error> + where M: for<'b> Fn(&'b mut Self) -> &'b mut FnvHashMap, + T: Clone + Eq + Hash, + U: Encodable { + let existing_shorthand = map(self).get(value).cloned(); + if let Some(shorthand) = existing_shorthand { + return self.emit_usize(shorthand); } - let _task = index.record(vid, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, vid); - encode_family(rbml_w, match variant.kind { - ty::VariantKind::Struct => 'V', - ty::VariantKind::Tuple => 'v', - ty::VariantKind::Unit => 'w', - }); - encode_name(rbml_w, variant.name); - encode_parent_item(rbml_w, did); - encode_visibility(rbml_w, vis); - - let attrs = ecx.tcx.get_attrs(vid); - encode_attributes(rbml_w, &attrs); - encode_repr_attrs(rbml_w, ecx, &attrs); + let start = self.position(); + variant.encode(self)?; + let len = self.position() - start; - let stab = ecx.tcx.lookup_stability(vid); - let depr = ecx.tcx.lookup_deprecation(vid); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); + // The shorthand encoding uses the same usize as the + // discriminant, with an offset so they can't conflict. + let discriminant = unsafe { + intrinsics::discriminant_value(variant) + }; + assert!(discriminant < SHORTHAND_OFFSET as u64); + let shorthand = start + SHORTHAND_OFFSET; - encode_struct_fields(rbml_w, variant); + // Get the number of bits that leb128 could fit + // in the same space as the fully encoded type. + let leb128_bits = len * 7; - let specified_disr_val = variant.disr_val; - if specified_disr_val != disr_val { - encode_disr_val(ecx, rbml_w, specified_disr_val); - disr_val = specified_disr_val; + // Check that the shorthand is a not longer than the + // full encoding itself, i.e. it's an obvious win. + if leb128_bits >= 64 || (shorthand as u64) < (1 << leb128_bits) { + map(self).insert(value.clone(), shorthand); } - encode_bounds_and_type_for_item(rbml_w, ecx, index, variant_node_id); - - rbml_w.end_tag(); - disr_val = disr_val.wrap_incr(); + Ok(()) } -} -/// Iterates through "auxiliary node IDs", which are node IDs that describe -/// top-level items that are sub-items of the given item. Specifically: -/// -/// * For newtype structs, iterates through the node ID of the constructor. -fn each_auxiliary_node_id(item: &hir::Item, callback: F) -> bool where - F: FnOnce(NodeId) -> bool, -{ - let mut continue_ = true; - match item.node { - hir::ItemStruct(ref struct_def, _) => { - // If this is a newtype struct, return the constructor. - if struct_def.is_tuple() { - continue_ = callback(struct_def.id()); - } - } - _ => {} + /// For every DefId that we create a metadata item for, we include a + /// serialized copy of its DefKey, which allows us to recreate a path. + fn encode_def_key(&mut self, def_id: DefId) -> Lazy { + let tcx = self.tcx; + self.lazy(&tcx.map.def_key(def_id)) } - continue_ -} - -fn encode_reexports(ecx: &EncodeContext, - rbml_w: &mut Encoder, - id: NodeId) { - debug!("(encoding info for module) encoding reexports for {}", id); - match ecx.reexports.get(&id) { - Some(exports) => { - debug!("(encoding info for module) found reexports for {}", id); - for exp in exports { - debug!("(encoding info for module) reexport '{}' ({:?}) for \ - {}", - exp.name, - exp.def_id, - id); - rbml_w.start_tag(tag_items_data_item_reexport); - rbml_w.wr_tagged_u64(tag_items_data_item_reexport_def_id, - def_to_u64(exp.def_id)); - rbml_w.wr_tagged_str(tag_items_data_item_reexport_name, - &exp.name.as_str()); - rbml_w.end_tag(); - } - }, - None => debug!("(encoding info for module) found no reexports for {}", id), + fn encode_item_variances(&mut self, def_id: DefId) -> LazySeq { + let tcx = self.tcx; + self.lazy_seq(tcx.item_variances(def_id).iter().cloned()) } -} -fn encode_info_for_mod(ecx: &EncodeContext, - rbml_w: &mut Encoder, - md: &hir::Mod, - attrs: &[ast::Attribute], - id: NodeId, - name: Name, - vis: &hir::Visibility) { - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, ecx.tcx.map.local_def_id(id)); - encode_family(rbml_w, 'm'); - encode_name(rbml_w, name); - debug!("(encoding info for module) encoding info for module ID {}", id); - - // Encode info about all the module children. - for item_id in &md.item_ids { - rbml_w.wr_tagged_u64(tag_mod_child, - def_to_u64(ecx.tcx.map.local_def_id(item_id.id))); - - let item = ecx.tcx.map.expect_item(item_id.id); - each_auxiliary_node_id(item, |auxiliary_node_id| { - rbml_w.wr_tagged_u64(tag_mod_child, - def_to_u64(ecx.tcx.map.local_def_id(auxiliary_node_id))); - true - }); + fn encode_item_type(&mut self, def_id: DefId) -> Lazy> { + let tcx = self.tcx; + self.lazy(&tcx.lookup_item_type(def_id).ty) } - encode_visibility(rbml_w, vis); - - let stab = ecx.tcx.lookup_stability(ecx.tcx.map.local_def_id(id)); - let depr = ecx.tcx.lookup_deprecation(ecx.tcx.map.local_def_id(id)); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); + /// Encode data for the given variant of the given ADT. The + /// index of the variant is untracked: this is ok because we + /// will have to lookup the adt-def by its id, and that gives us + /// the right to access any information in the adt-def (including, + /// e.g., the length of the various vectors). + fn encode_enum_variant_info(&mut self, + (enum_did, Untracked(index)): + (DefId, Untracked)) -> Entry<'tcx> { + let tcx = self.tcx; + let def = tcx.lookup_adt_def(enum_did); + let variant = &def.variants[index]; + let def_id = variant.did; + + let data = VariantData { + kind: variant.kind, + disr: variant.disr_val.to_u64_unchecked(), + struct_ctor: None + }; - // Encode the reexports of this module, if this module is public. - if *vis == hir::Public { - debug!("(encoding info for module) encoding reexports for {}", id); - encode_reexports(ecx, rbml_w, id); + let enum_id = tcx.map.as_local_node_id(enum_did).unwrap(); + let enum_vis = &tcx.map.expect_item(enum_id).vis; + + Entry { + kind: EntryKind::Variant(self.lazy(&data)), + visibility: enum_vis.simplify(), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&tcx.get_attrs(def_id)), + children: self.lazy_seq(variant.fields.iter().map(|f| { + assert!(f.did.is_local()); + f.did.index + })), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: None, + mir: None + } } - encode_attributes(rbml_w, attrs); - - rbml_w.end_tag(); -} -fn encode_struct_field_family(rbml_w: &mut Encoder, - visibility: ty::Visibility) { - encode_family(rbml_w, if visibility.is_public() { 'g' } else { 'N' }); -} + fn encode_info_for_mod(&mut self, + FromId(id, (md, attrs, vis)): + FromId<(&hir::Mod, &[ast::Attribute], &hir::Visibility)>) + -> Entry<'tcx> { + let tcx = self.tcx; + let def_id = tcx.map.local_def_id(id); + + let data = ModData { + reexports: match self.reexports.get(&id) { + Some(exports) if *vis == hir::Public => { + self.lazy_seq_ref(exports) + } + _ => LazySeq::empty() + } + }; -fn encode_visibility(rbml_w: &mut Encoder, visibility: T) { - let ch = if visibility.is_public() { 'y' } else { 'i' }; - rbml_w.wr_tagged_u8(tag_items_data_item_visibility, ch as u8); + Entry { + kind: EntryKind::Mod(self.lazy(&data)), + visibility: vis.simplify(), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(attrs), + children: self.lazy_seq(md.item_ids.iter().map(|item_id| { + tcx.map.local_def_id(item_id.id).index + })), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: None, + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: None, + predicates: None, + + ast: None, + mir: None + } + } } -trait HasVisibility: Sized { - fn is_public(self) -> bool; +trait Visibility { + fn simplify(&self) -> ty::Visibility; } -impl<'a> HasVisibility for &'a hir::Visibility { - fn is_public(self) -> bool { - *self == hir::Public +impl Visibility for hir::Visibility { + fn simplify(&self) -> ty::Visibility { + if *self == hir::Public { + ty::Visibility::Public + } else { + ty::Visibility::PrivateExternal + } } } -impl HasVisibility for ty::Visibility { - fn is_public(self) -> bool { - self == ty::Visibility::Public +impl Visibility for ty::Visibility { + fn simplify(&self) -> ty::Visibility { + if *self == ty::Visibility::Public { + ty::Visibility::Public + } else { + ty::Visibility::PrivateExternal + } } } -fn encode_constness(rbml_w: &mut Encoder, constness: hir::Constness) { - rbml_w.start_tag(tag_items_data_item_constness); - let ch = match constness { - hir::Constness::Const => 'c', - hir::Constness::NotConst => 'n', - }; - rbml_w.wr_str(&ch.to_string()); - rbml_w.end_tag(); -} - -fn encode_defaultness(rbml_w: &mut Encoder, defaultness: hir::Defaultness) { - let ch = match defaultness { - hir::Defaultness::Default => 'd', - hir::Defaultness::Final => 'f', - }; - rbml_w.wr_tagged_u8(tag_items_data_item_defaultness, ch as u8); -} - -fn encode_explicit_self(rbml_w: &mut Encoder, - explicit_self: &ty::ExplicitSelfCategory) { - let tag = tag_item_trait_method_explicit_self; - - // Encode the base self type. - match *explicit_self { - ty::ExplicitSelfCategory::Static => { - rbml_w.wr_tagged_bytes(tag, &['s' as u8]); - } - ty::ExplicitSelfCategory::ByValue => { - rbml_w.wr_tagged_bytes(tag, &['v' as u8]); - } - ty::ExplicitSelfCategory::ByBox => { - rbml_w.wr_tagged_bytes(tag, &['~' as u8]); - } - ty::ExplicitSelfCategory::ByReference(_, m) => { - // FIXME(#4846) encode custom lifetime - let ch = encode_mutability(m); - rbml_w.wr_tagged_bytes(tag, &['&' as u8, ch]); +impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { + fn encode_fields(&mut self, + adt_def_id: DefId) { + let def = self.tcx.lookup_adt_def(adt_def_id); + for (variant_index, variant) in def.variants.iter().enumerate() { + for (field_index, field) in variant.fields.iter().enumerate() { + self.record(field.did, + EncodeContext::encode_field, + (adt_def_id, Untracked((variant_index, field_index)))); + } } } +} - fn encode_mutability(m: hir::Mutability) -> u8 { - match m { - hir::MutImmutable => 'i' as u8, - hir::MutMutable => 'm' as u8, +impl<'a, 'tcx> EncodeContext<'a, 'tcx> { + /// Encode data for the given field of the given variant of the + /// given ADT. The indices of the variant/field are untracked: + /// this is ok because we will have to lookup the adt-def by its + /// id, and that gives us the right to access any information in + /// the adt-def (including, e.g., the length of the various + /// vectors). + fn encode_field(&mut self, + (adt_def_id, Untracked((variant_index, field_index))): + (DefId, Untracked<(usize, usize)>)) -> Entry<'tcx> { + let tcx = self.tcx; + let variant = &tcx.lookup_adt_def(adt_def_id).variants[variant_index]; + let field = &variant.fields[field_index]; + + let def_id = field.did; + let variant_id = tcx.map.as_local_node_id(variant.did).unwrap(); + let variant_data = tcx.map.expect_variant_data(variant_id); + + Entry { + kind: EntryKind::Field, + visibility: field.vis.simplify(), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&variant_data.fields()[field_index].attrs), + children: LazySeq::empty(), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: None, + mir: None } } -} -fn encode_item_sort(rbml_w: &mut Encoder, sort: char) { - rbml_w.wr_tagged_u8(tag_item_trait_item_sort, sort as u8); -} + fn encode_struct_ctor(&mut self, (adt_def_id, def_id): (DefId, DefId)) + -> Entry<'tcx> { + let variant = self.tcx.lookup_adt_def(adt_def_id).struct_variant(); -fn encode_field<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - field: ty::FieldDef<'tcx>, - index: &mut CrateIndex<'a, 'tcx>) { - let nm = field.name; - let id = ecx.local_id(field.did); - - let _task = index.record(field.did, rbml_w); - rbml_w.start_tag(tag_items_data_item); - debug!("encode_field: encoding {} {}", nm, id); - encode_struct_field_family(rbml_w, field.vis); - encode_name(rbml_w, nm); - encode_bounds_and_type_for_item(rbml_w, ecx, index, id); - encode_def_id_and_key(ecx, rbml_w, field.did); - - let stab = ecx.tcx.lookup_stability(field.did); - let depr = ecx.tcx.lookup_deprecation(field.did); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - - rbml_w.end_tag(); -} + let data = VariantData { + kind: variant.kind, + disr: variant.disr_val.to_u64_unchecked(), + struct_ctor: Some(def_id.index) + }; -fn encode_info_for_struct_ctor<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - name: Name, - struct_def: &hir::VariantData, - index: &mut CrateIndex<'a, 'tcx>, - struct_id: NodeId) { - let ctor_id = struct_def.id(); - let ctor_def_id = ecx.tcx.map.local_def_id(ctor_id); - - let _task = index.record(ctor_def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, ctor_def_id); - encode_family(rbml_w, match *struct_def { - hir::VariantData::Struct(..) => 'S', - hir::VariantData::Tuple(..) => 's', - hir::VariantData::Unit(..) => 'u', - }); - encode_bounds_and_type_for_item(rbml_w, ecx, index, ctor_id); - encode_name(rbml_w, name); - encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(struct_id)); - - let stab = ecx.tcx.lookup_stability(ecx.tcx.map.local_def_id(ctor_id)); - let depr= ecx.tcx.lookup_deprecation(ecx.tcx.map.local_def_id(ctor_id)); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - - // indicate that this is a tuple struct ctor, because downstream users will normally want - // the tuple struct definition, but without this there is no way for them to tell that - // they actually have a ctor rather than a normal function - rbml_w.wr_tagged_bytes(tag_items_data_item_is_tuple_struct_ctor, &[]); - - rbml_w.end_tag(); -} + Entry { + kind: EntryKind::Struct(self.lazy(&data)), + visibility: ty::Visibility::Public, + def_key: self.encode_def_key(def_id), + attributes: LazySeq::empty(), + children: LazySeq::empty(), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: None, + mir: None + } + } -fn encode_generics<'a, 'tcx>(rbml_w: &mut Encoder, - ecx: &EncodeContext<'a, 'tcx>, - index: &mut CrateIndex<'a, 'tcx>, - generics: &ty::Generics<'tcx>, - predicates: &ty::GenericPredicates<'tcx>, - tag: usize) -{ - rbml_w.start_tag(tag); - - for param in &generics.types { - rbml_w.start_tag(tag_type_param_def); - tyencode::enc_type_param_def(rbml_w.writer, &ecx.ty_str_ctxt(), param); - rbml_w.mark_stable_position(); - rbml_w.end_tag(); + fn encode_generics(&mut self, def_id: DefId) -> Lazy> { + let tcx = self.tcx; + self.lazy(tcx.lookup_generics(def_id)) } - // Region parameters - for param in &generics.regions { - rbml_w.start_tag(tag_region_param_def); - tyencode::enc_region_param_def(rbml_w.writer, &ecx.ty_str_ctxt(), param); - rbml_w.mark_stable_position(); - rbml_w.end_tag(); + fn encode_predicates(&mut self, def_id: DefId) -> Lazy> { + let tcx = self.tcx; + self.lazy(&tcx.lookup_predicates(def_id)) } - encode_predicates_in_current_doc(rbml_w, ecx, index, predicates); + fn encode_info_for_trait_item(&mut self, def_id: DefId) -> Entry<'tcx> { + let tcx = self.tcx; - rbml_w.end_tag(); -} + let node_id = tcx.map.as_local_node_id(def_id).unwrap(); + let ast_item = tcx.map.expect_trait_item(node_id); + let trait_item = tcx.impl_or_trait_item(def_id); -fn encode_predicates_in_current_doc<'a,'tcx>(rbml_w: &mut Encoder, - _ecx: &EncodeContext<'a,'tcx>, - index: &mut CrateIndex<'a, 'tcx>, - predicates: &ty::GenericPredicates<'tcx>) -{ - for (space, _, predicate) in predicates.predicates.iter_enumerated() { - let tag = match space { - subst::TypeSpace => tag_type_predicate, - subst::SelfSpace => tag_self_predicate, - subst::FnSpace => tag_fn_predicate + let container = |has_body| if has_body { + AssociatedContainer::TraitWithDefault + } else { + AssociatedContainer::TraitRequired }; - rbml_w.wr_tagged_u32(tag, - index.add_xref(XRef::Predicate(predicate.clone()))); - } -} - -fn encode_predicates<'a,'tcx>(rbml_w: &mut Encoder, - ecx: &EncodeContext<'a,'tcx>, - index: &mut CrateIndex<'a, 'tcx>, - predicates: &ty::GenericPredicates<'tcx>, - tag: usize) -{ - rbml_w.start_tag(tag); - encode_predicates_in_current_doc(rbml_w, ecx, index, predicates); - rbml_w.end_tag(); -} + let kind = match trait_item { + ty::ConstTraitItem(ref associated_const) => { + EntryKind::AssociatedConst(container(associated_const.has_value)) + } + ty::MethodTraitItem(ref method_ty) => { + let fn_data = if let hir::MethodTraitItem(ref sig, _) = ast_item.node { + FnData { + constness: hir::Constness::NotConst, + arg_names: self.encode_fn_arg_names(&sig.decl) + } + } else { + bug!() + }; + let data = MethodData { + fn_data: fn_data, + container: container(method_ty.has_body), + explicit_self: self.lazy(&method_ty.explicit_self) + }; + EntryKind::Method(self.lazy(&data)) + } + ty::TypeTraitItem(_) => { + EntryKind::AssociatedType(container(false)) + } + }; -fn encode_method_ty_fields<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - index: &mut CrateIndex<'a, 'tcx>, - method_ty: &ty::Method<'tcx>) { - encode_def_id_and_key(ecx, rbml_w, method_ty.def_id); - encode_name(rbml_w, method_ty.name); - encode_generics(rbml_w, ecx, index, - &method_ty.generics, &method_ty.predicates, - tag_method_ty_generics); - encode_visibility(rbml_w, method_ty.vis); - encode_explicit_self(rbml_w, &method_ty.explicit_self); - match method_ty.explicit_self { - ty::ExplicitSelfCategory::Static => { - encode_family(rbml_w, STATIC_METHOD_FAMILY); + Entry { + kind: kind, + visibility: trait_item.vis().simplify(), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&ast_item.attrs), + children: LazySeq::empty(), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: match trait_item { + ty::ConstTraitItem(_) | + ty::MethodTraitItem(_) => { + Some(self.encode_item_type(def_id)) + } + ty::TypeTraitItem(ref associated_type) => { + associated_type.ty.map(|ty| self.lazy(&ty)) + } + }, + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: if let ty::ConstTraitItem(_) = trait_item { + let trait_def_id = trait_item.container().id(); + Some(self.encode_inlined_item(InlinedItemRef::TraitItem(trait_def_id, ast_item))) + } else { + None + }, + mir: self.encode_mir(def_id) } - _ => encode_family(rbml_w, METHOD_FAMILY) } -} -fn encode_info_for_associated_const<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - index: &mut CrateIndex<'a, 'tcx>, - associated_const: &ty::AssociatedConst, - parent_id: NodeId, - impl_item_opt: Option<&hir::ImplItem>) { - debug!("encode_info_for_associated_const({:?},{:?})", - associated_const.def_id, - associated_const.name); - - let _task = index.record(associated_const.def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - - encode_def_id_and_key(ecx, rbml_w, associated_const.def_id); - encode_name(rbml_w, associated_const.name); - encode_visibility(rbml_w, associated_const.vis); - encode_family(rbml_w, 'C'); - - encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(parent_id)); - encode_item_sort(rbml_w, 'C'); - - encode_bounds_and_type_for_item(rbml_w, ecx, index, - ecx.local_id(associated_const.def_id)); - - let stab = ecx.tcx.lookup_stability(associated_const.def_id); - let depr = ecx.tcx.lookup_deprecation(associated_const.def_id); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - - if let Some(ii) = impl_item_opt { - encode_attributes(rbml_w, &ii.attrs); - encode_defaultness(rbml_w, ii.defaultness); - encode_inlined_item(ecx, - rbml_w, - InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id), - ii)); - encode_mir(ecx, rbml_w, ii.id); - } + fn encode_info_for_impl_item(&mut self, def_id: DefId) -> Entry<'tcx> { + let node_id = self.tcx.map.as_local_node_id(def_id).unwrap(); + let ast_item = self.tcx.map.expect_impl_item(node_id); + let impl_item = self.tcx.impl_or_trait_item(def_id); + let impl_def_id = impl_item.container().id(); - rbml_w.end_tag(); -} + let container = match ast_item.defaultness { + hir::Defaultness::Default => AssociatedContainer::ImplDefault, + hir::Defaultness::Final => AssociatedContainer::ImplFinal + }; -fn encode_info_for_method<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - index: &mut CrateIndex<'a, 'tcx>, - m: &ty::Method<'tcx>, - is_default_impl: bool, - parent_id: NodeId, - impl_item_opt: Option<&hir::ImplItem>) { - - debug!("encode_info_for_method: {:?} {:?}", m.def_id, - m.name); - let _task = index.record(m.def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - - encode_method_ty_fields(ecx, rbml_w, index, m); - encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(parent_id)); - encode_item_sort(rbml_w, 'r'); - - let stab = ecx.tcx.lookup_stability(m.def_id); - let depr = ecx.tcx.lookup_deprecation(m.def_id); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - - let m_node_id = ecx.local_id(m.def_id); - encode_bounds_and_type_for_item(rbml_w, ecx, index, m_node_id); - - if let Some(impl_item) = impl_item_opt { - if let hir::ImplItemKind::Method(ref sig, _) = impl_item.node { - encode_attributes(rbml_w, &impl_item.attrs); - let scheme = ecx.tcx.lookup_item_type(m.def_id); - let any_types = !scheme.generics.types.is_empty(); - let needs_inline = any_types || is_default_impl || - attr::requests_inline(&impl_item.attrs); - if needs_inline || sig.constness == hir::Constness::Const { - encode_inlined_item(ecx, - rbml_w, - InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id), - impl_item)); - encode_mir(ecx, rbml_w, impl_item.id); + let kind = match impl_item { + ty::ConstTraitItem(_) => { + EntryKind::AssociatedConst(container) } - encode_constness(rbml_w, sig.constness); - encode_defaultness(rbml_w, impl_item.defaultness); - encode_method_argument_names(rbml_w, &sig.decl); - } - } - - rbml_w.end_tag(); -} - -fn encode_info_for_associated_type<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - index: &mut CrateIndex<'a, 'tcx>, - associated_type: &ty::AssociatedType<'tcx>, - parent_id: NodeId, - impl_item_opt: Option<&hir::ImplItem>) { - debug!("encode_info_for_associated_type({:?},{:?})", - associated_type.def_id, - associated_type.name); - - let _task = index.record(associated_type.def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - - encode_def_id_and_key(ecx, rbml_w, associated_type.def_id); - encode_name(rbml_w, associated_type.name); - encode_visibility(rbml_w, associated_type.vis); - encode_family(rbml_w, 'y'); - encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(parent_id)); - encode_item_sort(rbml_w, 't'); - - let stab = ecx.tcx.lookup_stability(associated_type.def_id); - let depr = ecx.tcx.lookup_deprecation(associated_type.def_id); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - - if let Some(ii) = impl_item_opt { - encode_attributes(rbml_w, &ii.attrs); - encode_defaultness(rbml_w, ii.defaultness); - } else { - encode_predicates(rbml_w, ecx, index, - &ecx.tcx.lookup_predicates(associated_type.def_id), - tag_item_generics); - } - - if let Some(ty) = associated_type.ty { - encode_type(ecx, rbml_w, ty); - } - - rbml_w.end_tag(); -} + ty::MethodTraitItem(ref method_ty) => { + let fn_data = if let hir::ImplItemKind::Method(ref sig, _) = ast_item.node { + FnData { + constness: sig.constness, + arg_names: self.encode_fn_arg_names(&sig.decl) + } + } else { + bug!() + }; + let data = MethodData { + fn_data: fn_data, + container: container, + explicit_self: self.lazy(&method_ty.explicit_self) + }; + EntryKind::Method(self.lazy(&data)) + } + ty::TypeTraitItem(_) => { + EntryKind::AssociatedType(container) + } + }; -fn encode_method_argument_names(rbml_w: &mut Encoder, - decl: &hir::FnDecl) { - rbml_w.start_tag(tag_method_argument_names); - for arg in &decl.inputs { - let tag = tag_method_argument_name; - if let PatKind::Binding(_, ref path1, _) = arg.pat.node { - let name = path1.node.as_str(); - rbml_w.wr_tagged_bytes(tag, name.as_bytes()); + let (ast, mir) = if let ty::ConstTraitItem(_) = impl_item { + (true, true) + } else if let hir::ImplItemKind::Method(ref sig, _) = ast_item.node { + let generics = self.tcx.lookup_generics(def_id); + let types = generics.parent_types as usize + generics.types.len(); + let needs_inline = types > 0 || attr::requests_inline(&ast_item.attrs); + let is_const_fn = sig.constness == hir::Constness::Const; + (is_const_fn, needs_inline || is_const_fn) } else { - rbml_w.wr_tagged_bytes(tag, &[]); + (false, false) + }; + + Entry { + kind: kind, + visibility: impl_item.vis().simplify(), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&ast_item.attrs), + children: LazySeq::empty(), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: match impl_item { + ty::ConstTraitItem(_) | + ty::MethodTraitItem(_) => { + Some(self.encode_item_type(def_id)) + } + ty::TypeTraitItem(ref associated_type) => { + associated_type.ty.map(|ty| self.lazy(&ty)) + } + }, + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: if ast { + Some(self.encode_inlined_item(InlinedItemRef::ImplItem(impl_def_id, ast_item))) + } else { + None + }, + mir: if mir { + self.encode_mir(def_id) + } else { + None + } } } - rbml_w.end_tag(); -} -fn encode_repr_attrs(rbml_w: &mut Encoder, - ecx: &EncodeContext, - attrs: &[ast::Attribute]) { - let mut repr_attrs = Vec::new(); - for attr in attrs { - repr_attrs.extend(attr::find_repr_attrs(ecx.tcx.sess.diagnostic(), - attr)); + fn encode_fn_arg_names(&mut self, decl: &hir::FnDecl) -> LazySeq { + self.lazy_seq(decl.inputs.iter().map(|arg| { + if let PatKind::Binding(_, ref path1, _) = arg.pat.node { + path1.node + } else { + syntax::parse::token::intern("") + } + })) } - rbml_w.start_tag(tag_items_data_item_repr); - repr_attrs.encode(rbml_w); - rbml_w.end_tag(); -} -fn encode_mir(ecx: &EncodeContext, rbml_w: &mut Encoder, node_id: NodeId) { - let def_id = ecx.tcx.map.local_def_id(node_id); - if let Some(mir) = ecx.mir_map.map.get(&def_id) { - rbml_w.start_tag(tag_mir as usize); - rbml_w.emit_opaque(|opaque_encoder| { - tls::enter_encoding_context(ecx, opaque_encoder, |_, opaque_encoder| { - Encodable::encode(mir, opaque_encoder) - }) - }).unwrap(); - rbml_w.end_tag(); + fn encode_mir(&mut self, def_id: DefId) -> Option>> { + self.mir_map.map.get(&def_id).map(|mir| self.lazy(mir)) } -} -const FN_FAMILY: char = 'f'; -const STATIC_METHOD_FAMILY: char = 'F'; -const METHOD_FAMILY: char = 'h'; - -// Encodes the inherent implementations of a structure, enumeration, or trait. -fn encode_inherent_implementations(ecx: &EncodeContext, - rbml_w: &mut Encoder, - def_id: DefId) { - match ecx.tcx.inherent_impls.borrow().get(&def_id) { - None => {} - Some(implementations) => { - for &impl_def_id in implementations.iter() { - rbml_w.start_tag(tag_items_data_item_inherent_impl); - encode_def_id(rbml_w, impl_def_id); - rbml_w.end_tag(); + // Encodes the inherent implementations of a structure, enumeration, or trait. + fn encode_inherent_implementations(&mut self, def_id: DefId) -> LazySeq { + match self.tcx.inherent_impls.borrow().get(&def_id) { + None => LazySeq::empty(), + Some(implementations) => { + self.lazy_seq(implementations.iter().map(|&def_id| { + assert!(def_id.is_local()); + def_id.index + })) } } } -} -fn encode_stability(rbml_w: &mut Encoder, stab_opt: Option<&attr::Stability>) { - stab_opt.map(|stab| { - rbml_w.start_tag(tag_items_data_item_stability); - stab.encode(rbml_w).unwrap(); - rbml_w.end_tag(); - }); -} + fn encode_stability(&mut self, def_id: DefId) -> Option> { + self.tcx.lookup_stability(def_id).map(|stab| self.lazy(stab)) + } -fn encode_deprecation(rbml_w: &mut Encoder, depr_opt: Option) { - depr_opt.map(|depr| { - rbml_w.start_tag(tag_items_data_item_deprecation); - depr.encode(rbml_w).unwrap(); - rbml_w.end_tag(); - }); -} + fn encode_deprecation(&mut self, def_id: DefId) -> Option> { + self.tcx.lookup_deprecation(def_id).map(|depr| self.lazy(&depr)) + } -fn encode_parent_impl(rbml_w: &mut Encoder, parent_opt: Option) { - parent_opt.map(|parent| { - rbml_w.wr_tagged_u64(tag_items_data_parent_impl, def_to_u64(parent)); - }); -} + fn encode_info_for_item(&mut self, + (def_id, item): (DefId, &hir::Item)) -> Entry<'tcx> { + let tcx = self.tcx; + + debug!("encoding info for item at {}", + tcx.sess.codemap().span_to_string(item.span)); + + let kind = match item.node { + hir::ItemStatic(_, hir::MutMutable, _) => EntryKind::MutStatic, + hir::ItemStatic(_, hir::MutImmutable, _) => EntryKind::ImmStatic, + hir::ItemConst(..) => EntryKind::Const, + hir::ItemFn(ref decl, _, constness, ..) => { + let data = FnData { + constness: constness, + arg_names: self.encode_fn_arg_names(&decl) + }; -fn encode_xrefs<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - xrefs: FnvHashMap, u32>) -{ - let mut xref_positions = vec![0; xrefs.len()]; - rbml_w.start_tag(tag_xref_data); - for (xref, id) in xrefs.into_iter() { - xref_positions[id as usize] = rbml_w.mark_stable_position() as u32; - match xref { - XRef::Predicate(p) => { - tyencode::enc_predicate(rbml_w.writer, &ecx.ty_str_ctxt(), &p) + EntryKind::Fn(self.lazy(&data)) } - } - } - rbml_w.mark_stable_position(); - rbml_w.end_tag(); - - rbml_w.start_tag(tag_xref_index); - index::write_dense_index(xref_positions, rbml_w.writer); - rbml_w.end_tag(); -} + hir::ItemMod(ref m) => { + return self.encode_info_for_mod(FromId(item.id, (m, &item.attrs, &item.vis))); + } + hir::ItemForeignMod(_) => EntryKind::ForeignMod, + hir::ItemTy(..) => EntryKind::Type, + hir::ItemEnum(..) => EntryKind::Enum, + hir::ItemStruct(ref struct_def, _) => { + let variant = tcx.lookup_adt_def(def_id).struct_variant(); + + /* Encode def_ids for each field and method + for methods, write all the stuff get_trait_method + needs to know*/ + let struct_ctor = if !struct_def.is_struct() { + Some(tcx.map.local_def_id(struct_def.id()).index) + } else { + None + }; + EntryKind::Struct(self.lazy(&VariantData { + kind: variant.kind, + disr: variant.disr_val.to_u64_unchecked(), + struct_ctor: struct_ctor + })) + } + hir::ItemUnion(..) => { + let variant = tcx.lookup_adt_def(def_id).struct_variant(); + + EntryKind::Union(self.lazy(&VariantData { + kind: variant.kind, + disr: variant.disr_val.to_u64_unchecked(), + struct_ctor: None + })) + } + hir::ItemDefaultImpl(..) => { + let data = ImplData { + polarity: hir::ImplPolarity::Positive, + parent_impl: None, + coerce_unsized_kind: None, + trait_ref: tcx.impl_trait_ref(def_id).map(|trait_ref| self.lazy(&trait_ref)) + }; -fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - item: &hir::Item, - index: &mut CrateIndex<'a, 'tcx>) { - let tcx = ecx.tcx; - - debug!("encoding info for item at {}", - tcx.sess.codemap().span_to_string(item.span)); - - let vis = &item.vis; - let def_id = ecx.tcx.map.local_def_id(item.id); - - let (stab, depr) = tcx.dep_graph.with_task(DepNode::MetaData(def_id), || { - (tcx.lookup_stability(ecx.tcx.map.local_def_id(item.id)), - tcx.lookup_deprecation(ecx.tcx.map.local_def_id(item.id))) - }); - - match item.node { - hir::ItemStatic(_, m, _) => { - let _task = index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - if m == hir::MutMutable { - encode_family(rbml_w, 'b'); - } else { - encode_family(rbml_w, 'c'); - } - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - encode_name(rbml_w, item.name); - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - encode_attributes(rbml_w, &item.attrs); - rbml_w.end_tag(); - } - hir::ItemConst(_, _) => { - let _task = index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 'C'); - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - encode_name(rbml_w, item.name); - encode_attributes(rbml_w, &item.attrs); - encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(def_id, item)); - encode_mir(ecx, rbml_w, item.id); - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - rbml_w.end_tag(); - } - hir::ItemFn(ref decl, _, constness, _, ref generics, _) => { - let _task = index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, FN_FAMILY); - let tps_len = generics.ty_params.len(); - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - encode_name(rbml_w, item.name); - encode_attributes(rbml_w, &item.attrs); - let needs_inline = tps_len > 0 || attr::requests_inline(&item.attrs); - if needs_inline || constness == hir::Constness::Const { - encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(def_id, item)); - encode_mir(ecx, rbml_w, item.id); - } - encode_constness(rbml_w, constness); - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - encode_method_argument_names(rbml_w, &decl); - rbml_w.end_tag(); - } - hir::ItemMod(ref m) => { - let _task = index.record(def_id, rbml_w); - encode_info_for_mod(ecx, - rbml_w, - m, - &item.attrs, - item.id, - item.name, - &item.vis); - } - hir::ItemForeignMod(ref fm) => { - let _task = index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 'n'); - encode_name(rbml_w, item.name); - - // Encode all the items in this module. - for foreign_item in &fm.items { - rbml_w.wr_tagged_u64(tag_mod_child, - def_to_u64(ecx.tcx.map.local_def_id(foreign_item.id))); - } - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - rbml_w.end_tag(); - } - hir::ItemTy(..) => { - let _task = index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 'y'); - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - encode_name(rbml_w, item.name); - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - rbml_w.end_tag(); - } - hir::ItemEnum(ref enum_definition, _) => { - let _task = index.record(def_id, rbml_w); - - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 't'); - encode_item_variances(rbml_w, ecx, item.id); - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - encode_name(rbml_w, item.name); - encode_attributes(rbml_w, &item.attrs); - encode_repr_attrs(rbml_w, ecx, &item.attrs); - for v in &enum_definition.variants { - encode_variant_id(rbml_w, ecx.tcx.map.local_def_id(v.node.data.id())); - } - encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(def_id, item)); - encode_mir(ecx, rbml_w, item.id); - - // Encode inherent implementations for this enumeration. - encode_inherent_implementations(ecx, rbml_w, def_id); - - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - rbml_w.end_tag(); - - encode_enum_variant_info(ecx, - rbml_w, - def_id, - vis, - index); - } - hir::ItemStruct(ref struct_def, _) => { - /* Index the class*/ - let _task = index.record(def_id, rbml_w); - - let def = ecx.tcx.lookup_adt_def(def_id); - let variant = def.struct_variant(); - - /* Now, make an item for the class itself */ - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, match *struct_def { - hir::VariantData::Struct(..) => 'S', - hir::VariantData::Tuple(..) => 's', - hir::VariantData::Unit(..) => 'u', - }); - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - - encode_item_variances(rbml_w, ecx, item.id); - encode_name(rbml_w, item.name); - encode_attributes(rbml_w, &item.attrs); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - encode_visibility(rbml_w, vis); - encode_repr_attrs(rbml_w, ecx, &item.attrs); - - /* Encode def_ids for each field and method - for methods, write all the stuff get_trait_method - needs to know*/ - encode_struct_fields(rbml_w, variant); - - encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(def_id, item)); - encode_mir(ecx, rbml_w, item.id); - - // Encode inherent implementations for this structure. - encode_inherent_implementations(ecx, rbml_w, def_id); - - if !struct_def.is_struct() { - let ctor_did = ecx.tcx.map.local_def_id(struct_def.id()); - rbml_w.wr_tagged_u64(tag_items_data_item_struct_ctor, - def_to_u64(ctor_did)); - } + EntryKind::DefaultImpl(self.lazy(&data)) + } + hir::ItemImpl(_, polarity, ..) => { + let trait_ref = tcx.impl_trait_ref(def_id); + let parent = if let Some(trait_ref) = trait_ref { + let trait_def = tcx.lookup_trait_def(trait_ref.def_id); + trait_def.ancestors(def_id).skip(1).next().and_then(|node| { + match node { + specialization_graph::Node::Impl(parent) => Some(parent), + _ => None, + } + }) + } else { + None + }; - rbml_w.end_tag(); + let data = ImplData { + polarity: polarity, + parent_impl: parent, + coerce_unsized_kind: tcx.custom_coerce_unsized_kinds.borrow() + .get(&def_id).cloned(), + trait_ref: trait_ref.map(|trait_ref| self.lazy(&trait_ref)) + }; - for field in &variant.fields { - encode_field(ecx, rbml_w, field, index); - } + EntryKind::Impl(self.lazy(&data)) + } + hir::ItemTrait(..) => { + let trait_def = tcx.lookup_trait_def(def_id); + let data = TraitData { + unsafety: trait_def.unsafety, + paren_sugar: trait_def.paren_sugar, + has_default_impl: tcx.trait_has_default_impl(def_id), + trait_ref: self.lazy(&trait_def.trait_ref), + super_predicates: self.lazy(&tcx.lookup_super_predicates(def_id)) + }; - // If this is a tuple-like struct, encode the type of the constructor. - if !struct_def.is_struct() { - encode_info_for_struct_ctor(ecx, rbml_w, item.name, struct_def, index, item.id); - } - } - hir::ItemDefaultImpl(unsafety, _) => { - let _task = index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 'd'); - encode_name(rbml_w, item.name); - encode_unsafety(rbml_w, unsafety); - - let trait_ref = tcx.impl_trait_ref(ecx.tcx.map.local_def_id(item.id)).unwrap(); - encode_trait_ref(rbml_w, ecx, trait_ref, tag_item_trait_ref); - rbml_w.end_tag(); - } - hir::ItemImpl(unsafety, polarity, _, _, _, ref ast_items) => { - let _task = index.record(def_id, rbml_w); - - // We need to encode information about the default methods we - // have inherited, so we drive this based on the impl structure. - let impl_items = tcx.impl_items.borrow(); - let items = impl_items.get(&def_id).unwrap(); - - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 'i'); - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - encode_name(rbml_w, item.name); - encode_attributes(rbml_w, &item.attrs); - encode_unsafety(rbml_w, unsafety); - encode_polarity(rbml_w, polarity); - - match tcx.custom_coerce_unsized_kinds.borrow().get(&ecx.tcx.map.local_def_id(item.id)) { - Some(&kind) => { - rbml_w.start_tag(tag_impl_coerce_unsized_kind); - kind.encode(rbml_w); - rbml_w.end_tag(); + EntryKind::Trait(self.lazy(&data)) } - None => {} - } + hir::ItemExternCrate(_) | hir::ItemUse(_) => { + bug!("cannot encode info for item {:?}", item) + } + }; - for &item_def_id in items { - rbml_w.start_tag(tag_item_impl_item); - match item_def_id { - ty::ConstTraitItemId(item_def_id) => { - encode_def_id(rbml_w, item_def_id); - encode_item_sort(rbml_w, 'C'); + Entry { + kind: kind, + visibility: item.vis.simplify(), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&item.attrs), + children: match item.node { + hir::ItemForeignMod(ref fm) => { + self.lazy_seq(fm.items.iter().map(|foreign_item| { + tcx.map.local_def_id(foreign_item.id).index + })) } - ty::MethodTraitItemId(item_def_id) => { - encode_def_id(rbml_w, item_def_id); - encode_item_sort(rbml_w, 'r'); + hir::ItemEnum(..) => { + let def = self.tcx.lookup_adt_def(def_id); + self.lazy_seq(def.variants.iter().map(|v| { + assert!(v.did.is_local()); + v.did.index + })) } - ty::TypeTraitItemId(item_def_id) => { - encode_def_id(rbml_w, item_def_id); - encode_item_sort(rbml_w, 't'); + hir::ItemStruct(..) | + hir::ItemUnion(..) => { + let def = self.tcx.lookup_adt_def(def_id); + self.lazy_seq(def.struct_variant().fields.iter().map(|f| { + assert!(f.did.is_local()); + f.did.index + })) } - } - rbml_w.end_tag(); - } - let did = ecx.tcx.map.local_def_id(item.id); - if let Some(trait_ref) = tcx.impl_trait_ref(did) { - encode_trait_ref(rbml_w, ecx, trait_ref, tag_item_trait_ref); - - let trait_def = tcx.lookup_trait_def(trait_ref.def_id); - let parent = trait_def.ancestors(did) - .skip(1) - .next() - .and_then(|node| match node { - specialization_graph::Node::Impl(parent) => Some(parent), - _ => None, - }); - encode_parent_impl(rbml_w, parent); - } - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - rbml_w.end_tag(); - - // Iterate down the trait items, emitting them. We rely on the - // assumption that all of the actually implemented trait items - // appear first in the impl structure, in the same order they do - // in the ast. This is a little sketchy. - let num_implemented_methods = ast_items.len(); - for (i, &trait_item_def_id) in items.iter().enumerate() { - let ast_item = if i < num_implemented_methods { - Some(&ast_items[i]) - } else { - None - }; - - match tcx.impl_or_trait_item(trait_item_def_id.def_id()) { - ty::ConstTraitItem(ref associated_const) => { - encode_info_for_associated_const(ecx, - rbml_w, - index, - &associated_const, - item.id, - ast_item) + hir::ItemImpl(..) | + hir::ItemTrait(..) => { + self.lazy_seq(tcx.impl_or_trait_items(def_id).iter().map(|&def_id| { + assert!(def_id.is_local()); + def_id.index + })) } - ty::MethodTraitItem(ref method_type) => { - encode_info_for_method(ecx, - rbml_w, - index, - &method_type, - false, - item.id, - ast_item) + _ => LazySeq::empty() + }, + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: match item.node { + hir::ItemStatic(..) | + hir::ItemConst(..) | + hir::ItemFn(..) | + hir::ItemTy(..) | + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) | + hir::ItemImpl(..) => { + Some(self.encode_item_type(def_id)) } - ty::TypeTraitItem(ref associated_type) => { - encode_info_for_associated_type(ecx, - rbml_w, - index, - &associated_type, - item.id, - ast_item) + _ => None + }, + inherent_impls: self.encode_inherent_implementations(def_id), + variances: match item.node { + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) | + hir::ItemTrait(..) => { + self.encode_item_variances(def_id) } - } - } - } - hir::ItemTrait(_, _, _, ref ms) => { - let _task = index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 'I'); - encode_item_variances(rbml_w, ecx, item.id); - let trait_def = tcx.lookup_trait_def(def_id); - let trait_predicates = tcx.lookup_predicates(def_id); - encode_unsafety(rbml_w, trait_def.unsafety); - encode_paren_sugar(rbml_w, trait_def.paren_sugar); - encode_defaulted(rbml_w, tcx.trait_has_default_impl(def_id)); - encode_associated_type_names(rbml_w, &trait_def.associated_type_names); - encode_generics(rbml_w, ecx, index, - &trait_def.generics, &trait_predicates, - tag_item_generics); - encode_predicates(rbml_w, ecx, index, - &tcx.lookup_super_predicates(def_id), - tag_item_super_predicates); - encode_trait_ref(rbml_w, ecx, trait_def.trait_ref, tag_item_trait_ref); - encode_name(rbml_w, item.name); - encode_attributes(rbml_w, &item.attrs); - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - for &method_def_id in tcx.trait_item_def_ids(def_id).iter() { - rbml_w.start_tag(tag_item_trait_item); - match method_def_id { - ty::ConstTraitItemId(const_def_id) => { - encode_def_id(rbml_w, const_def_id); - encode_item_sort(rbml_w, 'C'); + _ => LazySeq::empty() + }, + generics: match item.node { + hir::ItemStatic(..) | + hir::ItemConst(..) | + hir::ItemFn(..) | + hir::ItemTy(..) | + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) | + hir::ItemImpl(..) | + hir::ItemTrait(..) => { + Some(self.encode_generics(def_id)) + } + _ => None + }, + predicates: match item.node { + hir::ItemStatic(..) | + hir::ItemConst(..) | + hir::ItemFn(..) | + hir::ItemTy(..) | + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) | + hir::ItemImpl(..) | + hir::ItemTrait(..) => { + Some(self.encode_predicates(def_id)) + } + _ => None + }, + + ast: match item.node { + hir::ItemConst(..) | + hir::ItemFn(_, _, hir::Constness::Const, ..) => { + Some(self.encode_inlined_item(InlinedItemRef::Item(def_id, item))) } - ty::MethodTraitItemId(method_def_id) => { - encode_def_id(rbml_w, method_def_id); - encode_item_sort(rbml_w, 'r'); + _ => None + }, + mir: match item.node { + hir::ItemConst(..) => { + self.encode_mir(def_id) } - ty::TypeTraitItemId(type_def_id) => { - encode_def_id(rbml_w, type_def_id); - encode_item_sort(rbml_w, 't'); + hir::ItemFn(_, _, constness, _, ref generics, _) => { + let tps_len = generics.ty_params.len(); + let needs_inline = tps_len > 0 || attr::requests_inline(&item.attrs); + if needs_inline || constness == hir::Constness::Const { + self.encode_mir(def_id) + } else { + None + } } + _ => None } - rbml_w.end_tag(); - - rbml_w.wr_tagged_u64(tag_mod_child, - def_to_u64(method_def_id.def_id())); } + } +} - // Encode inherent implementations for this trait. - encode_inherent_implementations(ecx, rbml_w, def_id); - - rbml_w.end_tag(); - - // Now output the trait item info for each trait item. - let r = tcx.trait_item_def_ids(def_id); - for (i, &item_def_id) in r.iter().enumerate() { - assert_eq!(item_def_id.def_id().krate, LOCAL_CRATE); - - let _task = index.record(item_def_id.def_id(), rbml_w); - rbml_w.start_tag(tag_items_data_item); - - encode_parent_item(rbml_w, def_id); - - let stab = tcx.lookup_stability(item_def_id.def_id()); - let depr = tcx.lookup_deprecation(item_def_id.def_id()); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - - let trait_item_type = - tcx.impl_or_trait_item(item_def_id.def_id()); - let is_nonstatic_method; - match trait_item_type { - ty::ConstTraitItem(associated_const) => { - encode_name(rbml_w, associated_const.name); - encode_def_id_and_key(ecx, rbml_w, associated_const.def_id); - encode_visibility(rbml_w, associated_const.vis); - - encode_family(rbml_w, 'C'); - - encode_bounds_and_type_for_item(rbml_w, ecx, index, - ecx.local_id(associated_const.def_id)); - - is_nonstatic_method = false; +impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { + /// In some cases, along with the item itself, we also + /// encode some sub-items. Usually we want some info from the item + /// so it's easier to do that here then to wait until we would encounter + /// normally in the visitor walk. + fn encode_addl_info_for_item(&mut self, + item: &hir::Item) { + let def_id = self.tcx.map.local_def_id(item.id); + match item.node { + hir::ItemStatic(..) | + hir::ItemConst(..) | + hir::ItemFn(..) | + hir::ItemMod(..) | + hir::ItemForeignMod(..) | + hir::ItemExternCrate(..) | + hir::ItemUse(..) | + hir::ItemDefaultImpl(..) | + hir::ItemTy(..) => { + // no sub-item recording needed in these cases + } + hir::ItemEnum(..) => { + self.encode_fields(def_id); + + let def = self.tcx.lookup_adt_def(def_id); + for (i, variant) in def.variants.iter().enumerate() { + self.record(variant.did, + EncodeContext::encode_enum_variant_info, + (def_id, Untracked(i))); } - ty::MethodTraitItem(method_ty) => { - let method_def_id = item_def_id.def_id(); - - encode_method_ty_fields(ecx, rbml_w, index, &method_ty); + } + hir::ItemStruct(ref struct_def, _) => { + self.encode_fields(def_id); - match method_ty.explicit_self { - ty::ExplicitSelfCategory::Static => { - encode_family(rbml_w, - STATIC_METHOD_FAMILY); - } - _ => { - encode_family(rbml_w, - METHOD_FAMILY); - } + // If this is a tuple-like struct, encode the type of the constructor. + match self.tcx.lookup_adt_def(def_id).struct_variant().kind { + ty::VariantKind::Struct => { + // no value for structs like struct Foo { ... } } - encode_bounds_and_type_for_item(rbml_w, ecx, index, - ecx.local_id(method_def_id)); - - is_nonstatic_method = method_ty.explicit_self != - ty::ExplicitSelfCategory::Static; - } - ty::TypeTraitItem(associated_type) => { - encode_name(rbml_w, associated_type.name); - encode_def_id_and_key(ecx, rbml_w, associated_type.def_id); - encode_item_sort(rbml_w, 't'); - encode_family(rbml_w, 'y'); - - if let Some(ty) = associated_type.ty { - encode_type(ecx, rbml_w, ty); + ty::VariantKind::Tuple | ty::VariantKind::Unit => { + // there is a value for structs like `struct + // Foo()` and `struct Foo` + let ctor_def_id = self.tcx.map.local_def_id(struct_def.id()); + self.record(ctor_def_id, + EncodeContext::encode_struct_ctor, + (def_id, ctor_def_id)); } - - is_nonstatic_method = false; } } - - let trait_item = &ms[i]; - encode_attributes(rbml_w, &trait_item.attrs); - match trait_item.node { - hir::ConstTraitItem(_, ref default) => { - if default.is_some() { - encode_item_sort(rbml_w, 'C'); - } else { - encode_item_sort(rbml_w, 'c'); - } - - encode_inlined_item(ecx, rbml_w, - InlinedItemRef::TraitItem(def_id, trait_item)); - encode_mir(ecx, rbml_w, trait_item.id); + hir::ItemUnion(..) => { + self.encode_fields(def_id); + } + hir::ItemImpl(..) => { + for &trait_item_def_id in &self.tcx.impl_or_trait_items(def_id)[..] { + self.record(trait_item_def_id, + EncodeContext::encode_info_for_impl_item, + trait_item_def_id); } - hir::MethodTraitItem(ref sig, ref body) => { - // If this is a static method, we've already - // encoded this. - if is_nonstatic_method { - // FIXME: I feel like there is something funny - // going on. - encode_bounds_and_type_for_item(rbml_w, ecx, index, - ecx.local_id(item_def_id.def_id())); - } - - if body.is_some() { - encode_item_sort(rbml_w, 'p'); - encode_inlined_item(ecx, rbml_w, - InlinedItemRef::TraitItem(def_id, trait_item)); - encode_mir(ecx, rbml_w, trait_item.id); - } else { - encode_item_sort(rbml_w, 'r'); - } - encode_method_argument_names(rbml_w, &sig.decl); + } + hir::ItemTrait(..) => { + for &item_def_id in &self.tcx.impl_or_trait_items(def_id)[..] { + self.record(item_def_id, + EncodeContext::encode_info_for_trait_item, + item_def_id); } - - hir::TypeTraitItem(..) => {} } - - rbml_w.end_tag(); } - } - hir::ItemExternCrate(_) | hir::ItemUse(_) => { - // these are encoded separately - } } } -fn encode_info_for_foreign_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - nitem: &hir::ForeignItem, - index: &mut CrateIndex<'a, 'tcx>) { - debug!("writing foreign item {}", ecx.tcx.node_path_str(nitem.id)); - let def_id = ecx.tcx.map.local_def_id(nitem.id); - let abi = ecx.tcx.map.get_foreign_abi(nitem.id); - - let _task = index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - let parent_id = ecx.tcx.map.get_parent(nitem.id); - encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(parent_id)); - encode_visibility(rbml_w, &nitem.vis); - match nitem.node { - hir::ForeignItemFn(ref fndecl, _) => { - encode_family(rbml_w, FN_FAMILY); - encode_bounds_and_type_for_item(rbml_w, ecx, index, nitem.id); - encode_name(rbml_w, nitem.name); - if abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { - encode_inlined_item(ecx, rbml_w, InlinedItemRef::Foreign(def_id, nitem)); - encode_mir(ecx, rbml_w, nitem.id); - } - encode_attributes(rbml_w, &nitem.attrs); - let stab = ecx.tcx.lookup_stability(ecx.tcx.map.local_def_id(nitem.id)); - let depr = ecx.tcx.lookup_deprecation(ecx.tcx.map.local_def_id(nitem.id)); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - encode_method_argument_names(rbml_w, &fndecl); - } - hir::ForeignItemStatic(_, mutbl) => { - if mutbl { - encode_family(rbml_w, 'b'); - } else { - encode_family(rbml_w, 'c'); - } - encode_bounds_and_type_for_item(rbml_w, ecx, index, nitem.id); - encode_attributes(rbml_w, &nitem.attrs); - let stab = ecx.tcx.lookup_stability(ecx.tcx.map.local_def_id(nitem.id)); - let depr = ecx.tcx.lookup_deprecation(ecx.tcx.map.local_def_id(nitem.id)); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - encode_name(rbml_w, nitem.name); - } - } - rbml_w.end_tag(); -} - -fn my_visit_expr(expr: &hir::Expr, - rbml_w: &mut Encoder, - ecx: &EncodeContext, - index: &mut CrateIndex) { - match expr.node { - hir::ExprClosure(..) => { - let def_id = ecx.tcx.map.local_def_id(expr.id); - - let _task = index.record(def_id, rbml_w); - - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_name(rbml_w, syntax::parse::token::intern("")); - - rbml_w.start_tag(tag_items_closure_ty); - write_closure_type(ecx, rbml_w, &ecx.tcx.tables.borrow().closure_tys[&def_id]); - rbml_w.end_tag(); +impl<'a, 'tcx> EncodeContext<'a, 'tcx> { + fn encode_info_for_foreign_item(&mut self, + (def_id, nitem): (DefId, &hir::ForeignItem)) + -> Entry<'tcx> { + let tcx = self.tcx; - rbml_w.start_tag(tag_items_closure_kind); - ecx.tcx.closure_kind(def_id).encode(rbml_w).unwrap(); - rbml_w.end_tag(); + debug!("writing foreign item {}", tcx.node_path_str(nitem.id)); - assert!(ecx.mir_map.map.contains_key(&def_id)); - encode_mir(ecx, rbml_w, expr.id); + let kind = match nitem.node { + hir::ForeignItemFn(ref fndecl, _) => { + let data = FnData { + constness: hir::Constness::NotConst, + arg_names: self.encode_fn_arg_names(&fndecl) + }; + EntryKind::ForeignFn(self.lazy(&data)) + } + hir::ForeignItemStatic(_, true) => EntryKind::ForeignMutStatic, + hir::ForeignItemStatic(_, false) => EntryKind::ForeignImmStatic + }; - rbml_w.end_tag(); + Entry { + kind: kind, + visibility: nitem.vis.simplify(), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&nitem.attrs), + children: LazySeq::empty(), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: None, + mir: None } - _ => { } } } -struct EncodeVisitor<'a, 'b:'a, 'c:'a, 'tcx:'c> { - rbml_w_for_visit_item: &'a mut Encoder<'b>, - ecx: &'a EncodeContext<'c, 'tcx>, - index: &'a mut CrateIndex<'c, 'tcx>, +struct EncodeVisitor<'a, 'b: 'a, 'tcx: 'b> { + index: IndexBuilder<'a, 'b, 'tcx>, } -impl<'a, 'b, 'c, 'tcx> Visitor<'tcx> for EncodeVisitor<'a, 'b, 'c, 'tcx> { +impl<'a, 'b, 'tcx> Visitor<'tcx> for EncodeVisitor<'a, 'b, 'tcx> { fn visit_expr(&mut self, ex: &'tcx hir::Expr) { intravisit::walk_expr(self, ex); - my_visit_expr(ex, self.rbml_w_for_visit_item, self.ecx, self.index); + self.index.encode_info_for_expr(ex); } - fn visit_item(&mut self, i: &'tcx hir::Item) { - intravisit::walk_item(self, i); - encode_info_for_item(self.ecx, self.rbml_w_for_visit_item, i, self.index); + fn visit_item(&mut self, item: &'tcx hir::Item) { + intravisit::walk_item(self, item); + let def_id = self.index.tcx.map.local_def_id(item.id); + match item.node { + hir::ItemExternCrate(_) | hir::ItemUse(_) => (), // ignore these + _ => self.index.record(def_id, + EncodeContext::encode_info_for_item, + (def_id, item)), + } + self.index.encode_addl_info_for_item(item); } fn visit_foreign_item(&mut self, ni: &'tcx hir::ForeignItem) { intravisit::walk_foreign_item(self, ni); - encode_info_for_foreign_item(self.ecx, self.rbml_w_for_visit_item, ni, self.index); + let def_id = self.index.tcx.map.local_def_id(ni.id); + self.index.record(def_id, + EncodeContext::encode_info_for_foreign_item, + (def_id, ni)); } fn visit_ty(&mut self, ty: &'tcx hir::Ty) { intravisit::walk_ty(self, ty); - - if let hir::TyImplTrait(_) = ty.node { - let rbml_w = &mut *self.rbml_w_for_visit_item; - let def_id = self.ecx.tcx.map.local_def_id(ty.id); - let _task = self.index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(self.ecx, rbml_w, def_id); - encode_family(rbml_w, 'y'); - encode_bounds_and_type_for_item(rbml_w, self.ecx, self.index, ty.id); - rbml_w.end_tag(); - } + self.index.encode_info_for_ty(ty); } } -fn encode_info_for_items<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder) - -> CrateIndex<'a, 'tcx> { - let krate = ecx.tcx.map.krate(); - - let mut index = CrateIndex { - dep_graph: &ecx.tcx.dep_graph, - items: IndexData::new(ecx.tcx.map.num_local_def_ids()), - xrefs: FnvHashMap() - }; - rbml_w.start_tag(tag_items_data); - - { - let _task = index.record(DefId::local(CRATE_DEF_INDEX), rbml_w); - encode_info_for_mod(ecx, - rbml_w, - &krate.module, - &[], - CRATE_NODE_ID, - syntax::parse::token::intern(&ecx.link_meta.crate_name), - &hir::Public); - } - - krate.visit_all_items(&mut EncodeVisitor { - index: &mut index, - ecx: ecx, - rbml_w_for_visit_item: &mut *rbml_w, - }); - - rbml_w.end_tag(); - index -} - -fn encode_item_index(rbml_w: &mut Encoder, index: IndexData) { - rbml_w.start_tag(tag_index); - index.write_index(rbml_w.writer); - rbml_w.end_tag(); -} - -fn encode_meta_item(rbml_w: &mut Encoder, mi: &ast::MetaItem) { - if mi.is_word() { - let name = mi.name(); - rbml_w.start_tag(tag_meta_item_word); - rbml_w.wr_tagged_str(tag_meta_item_name, &name); - rbml_w.end_tag(); - } else if mi.is_value_str() { - let name = mi.name(); - /* FIXME (#623): support other literal kinds */ - let value = mi.value_str().unwrap(); - rbml_w.start_tag(tag_meta_item_name_value); - rbml_w.wr_tagged_str(tag_meta_item_name, &name); - rbml_w.wr_tagged_str(tag_meta_item_value, &value); - rbml_w.end_tag(); - } else { // it must be a list - let name = mi.name(); - let items = mi.meta_item_list().unwrap(); - rbml_w.start_tag(tag_meta_item_list); - rbml_w.wr_tagged_str(tag_meta_item_name, &name); - for inner_item in items { - encode_meta_item(rbml_w, &inner_item); +impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { + fn encode_info_for_ty(&mut self, ty: &hir::Ty) { + if let hir::TyImplTrait(_) = ty.node { + let def_id = self.tcx.map.local_def_id(ty.id); + self.record(def_id, + EncodeContext::encode_info_for_anon_ty, + def_id); } - rbml_w.end_tag(); } -} -fn encode_attributes(rbml_w: &mut Encoder, attrs: &[ast::Attribute]) { - rbml_w.start_tag(tag_attributes); - for attr in attrs { - rbml_w.start_tag(tag_attribute); - rbml_w.wr_tagged_u8(tag_attribute_is_sugared_doc, attr.node.is_sugared_doc as u8); - encode_meta_item(rbml_w, attr.meta()); - rbml_w.end_tag(); + fn encode_info_for_expr(&mut self, expr: &hir::Expr) { + match expr.node { + hir::ExprClosure(..) => { + let def_id = self.tcx.map.local_def_id(expr.id); + self.record(def_id, + EncodeContext::encode_info_for_closure, + def_id); + } + _ => { } + } } - rbml_w.end_tag(); -} - -fn encode_unsafety(rbml_w: &mut Encoder, unsafety: hir::Unsafety) { - let byte: u8 = match unsafety { - hir::Unsafety::Normal => 0, - hir::Unsafety::Unsafe => 1, - }; - rbml_w.wr_tagged_u8(tag_unsafety, byte); -} - -fn encode_paren_sugar(rbml_w: &mut Encoder, paren_sugar: bool) { - let byte: u8 = if paren_sugar {1} else {0}; - rbml_w.wr_tagged_u8(tag_paren_sugar, byte); } -fn encode_defaulted(rbml_w: &mut Encoder, is_defaulted: bool) { - let byte: u8 = if is_defaulted {1} else {0}; - rbml_w.wr_tagged_u8(tag_defaulted_trait, byte); -} - -fn encode_associated_type_names(rbml_w: &mut Encoder, names: &[Name]) { - rbml_w.start_tag(tag_associated_type_names); - for &name in names { - rbml_w.wr_tagged_str(tag_associated_type_name, &name.as_str()); +impl<'a, 'tcx> EncodeContext<'a, 'tcx> { + fn encode_info_for_anon_ty(&mut self, def_id: DefId) -> Entry<'tcx> { + Entry { + kind: EntryKind::Type, + visibility: ty::Visibility::Public, + def_key: self.encode_def_key(def_id), + attributes: LazySeq::empty(), + children: LazySeq::empty(), + stability: None, + deprecation: None, + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: None, + mir: None + } } - rbml_w.end_tag(); -} - -fn encode_polarity(rbml_w: &mut Encoder, polarity: hir::ImplPolarity) { - let byte: u8 = match polarity { - hir::ImplPolarity::Positive => 0, - hir::ImplPolarity::Negative => 1, - }; - rbml_w.wr_tagged_u8(tag_polarity, byte); -} -fn encode_crate_deps(rbml_w: &mut Encoder, cstore: &cstore::CStore) { - fn get_ordered_deps(cstore: &cstore::CStore) - -> Vec<(CrateNum, Rc)> { - // Pull the cnums and name,vers,hash out of cstore - let mut deps = Vec::new(); - cstore.iter_crate_data(|cnum, val| { - deps.push((cnum, val.clone())); - }); + fn encode_info_for_closure(&mut self, def_id: DefId) -> Entry<'tcx> { + let tcx = self.tcx; - // Sort by cnum - deps.sort_by(|kv1, kv2| kv1.0.cmp(&kv2.0)); + let data = ClosureData { + kind: tcx.closure_kind(def_id), + ty: self.lazy(&tcx.tables.borrow().closure_tys[&def_id]) + }; - // Sanity-check the crate numbers - let mut expected_cnum = 1; - for &(n, _) in &deps { - assert_eq!(n, expected_cnum); - expected_cnum += 1; + Entry { + kind: EntryKind::Closure(self.lazy(&data)), + visibility: ty::Visibility::Public, + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&tcx.get_attrs(def_id)), + children: LazySeq::empty(), + stability: None, + deprecation: None, + + ty: None, + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: None, + predicates: None, + + ast: None, + mir: self.encode_mir(def_id) } - - deps } - // We're just going to write a list of crate 'name-hash-version's, with - // the assumption that they are numbered 1 to n. - // FIXME (#2166): This is not nearly enough to support correct versioning - // but is enough to get transitive crate dependencies working. - rbml_w.start_tag(tag_crate_deps); - for (_cnum, dep) in get_ordered_deps(cstore) { - encode_crate_dep(rbml_w, &dep); + fn encode_info_for_items(&mut self) -> Index { + let krate = self.tcx.map.krate(); + let mut index = IndexBuilder::new(self); + index.record(DefId::local(CRATE_DEF_INDEX), + EncodeContext::encode_info_for_mod, + FromId(CRATE_NODE_ID, (&krate.module, &krate.attrs, &hir::Public))); + let mut visitor = EncodeVisitor { + index: index, + }; + krate.visit_all_items(&mut visitor); + visitor.index.into_items() } - rbml_w.end_tag(); -} -fn encode_lang_items(ecx: &EncodeContext, rbml_w: &mut Encoder) { - rbml_w.start_tag(tag_lang_items); + fn encode_attributes(&mut self, attrs: &[ast::Attribute]) -> LazySeq { + self.lazy_seq_ref(attrs) + } - for (i, &opt_def_id) in ecx.tcx.lang_items.items().iter().enumerate() { - if let Some(def_id) = opt_def_id { - if def_id.is_local() { - rbml_w.start_tag(tag_lang_items_item); - rbml_w.wr_tagged_u32(tag_lang_items_item_id, i as u32); - rbml_w.wr_tagged_u32(tag_lang_items_item_index, def_id.index.as_u32()); - rbml_w.end_tag(); + fn encode_crate_deps(&mut self) -> LazySeq { + fn get_ordered_deps(cstore: &cstore::CStore) + -> Vec<(CrateNum, Rc)> { + // Pull the cnums and name,vers,hash out of cstore + let mut deps = Vec::new(); + cstore.iter_crate_data(|cnum, val| { + deps.push((cnum, val.clone())); + }); + + // Sort by cnum + deps.sort_by(|kv1, kv2| kv1.0.cmp(&kv2.0)); + + // Sanity-check the crate numbers + let mut expected_cnum = 1; + for &(n, _) in &deps { + assert_eq!(n, CrateNum::new(expected_cnum)); + expected_cnum += 1; } + + deps } - } - for i in &ecx.tcx.lang_items.missing { - rbml_w.wr_tagged_u32(tag_lang_items_missing, *i as u32); + // We're just going to write a list of crate 'name-hash-version's, with + // the assumption that they are numbered 1 to n. + // FIXME (#2166): This is not nearly enough to support correct versioning + // but is enough to get transitive crate dependencies working. + let deps = get_ordered_deps(self.cstore); + self.lazy_seq(deps.iter().map(|&(_, ref dep)| { + CrateDep { + name: syntax::parse::token::intern(dep.name()), + hash: dep.hash(), + explicitly_linked: dep.explicitly_linked.get() + } + })) } - rbml_w.end_tag(); // tag_lang_items -} - -fn encode_native_libraries(ecx: &EncodeContext, rbml_w: &mut Encoder) { - rbml_w.start_tag(tag_native_libraries); - - for &(ref lib, kind) in ecx.tcx.sess.cstore.used_libraries().iter() { - match kind { - cstore::NativeStatic => {} // these libraries are not propagated - cstore::NativeFramework | cstore::NativeUnknown => { - rbml_w.start_tag(tag_native_libraries_lib); - rbml_w.wr_tagged_u32(tag_native_libraries_kind, kind as u32); - rbml_w.wr_tagged_str(tag_native_libraries_name, lib); - rbml_w.end_tag(); + fn encode_lang_items(&mut self) + -> (LazySeq<(DefIndex, usize)>, LazySeq) { + let tcx = self.tcx; + let lang_items = tcx.lang_items.items().iter(); + (self.lazy_seq(lang_items.enumerate().filter_map(|(i, &opt_def_id)| { + if let Some(def_id) = opt_def_id { + if def_id.is_local() { + return Some((def_id.index, i)); + } } - } + None + })), self.lazy_seq_ref(&tcx.lang_items.missing)) } - rbml_w.end_tag(); -} - -fn encode_plugin_registrar_fn(ecx: &EncodeContext, rbml_w: &mut Encoder) { - match ecx.tcx.sess.plugin_registrar_fn.get() { - Some(id) => { - let def_id = ecx.tcx.map.local_def_id(id); - rbml_w.wr_tagged_u32(tag_plugin_registrar_fn, def_id.index.as_u32()); - } - None => {} + fn encode_native_libraries(&mut self) -> LazySeq<(NativeLibraryKind, String)> { + let used_libraries = self.tcx.sess.cstore.used_libraries(); + self.lazy_seq(used_libraries.into_iter().filter_map(|(lib, kind)| { + match kind { + cstore::NativeStatic => None, // these libraries are not propagated + cstore::NativeFramework | cstore::NativeUnknown => { + Some((kind, lib)) + } + } + })) } -} - -fn encode_codemap(ecx: &EncodeContext, rbml_w: &mut Encoder) { - rbml_w.start_tag(tag_codemap); - let codemap = ecx.tcx.sess.codemap(); - - for filemap in &codemap.files.borrow()[..] { - if filemap.lines.borrow().is_empty() || filemap.is_imported() { + fn encode_codemap(&mut self) -> LazySeq { + let codemap = self.tcx.sess.codemap(); + let all_filemaps = codemap.files.borrow(); + self.lazy_seq_ref(all_filemaps.iter().filter(|filemap| { // No need to export empty filemaps, as they can't contain spans // that need translation. // Also no need to re-export imported filemaps, as any downstream // crate will import them from their original source. - continue; - } - - rbml_w.start_tag(tag_codemap_filemap); - rbml_w.emit_opaque(|opaque_encoder| { - filemap.encode(opaque_encoder) - }).unwrap(); - rbml_w.end_tag(); + !filemap.lines.borrow().is_empty() && !filemap.is_imported() + }).map(|filemap| &**filemap)) } - rbml_w.end_tag(); -} - -/// Serialize the text of the exported macros -fn encode_macro_defs(rbml_w: &mut Encoder, - krate: &hir::Crate) { - rbml_w.start_tag(tag_macro_defs); - for def in &krate.exported_macros { - rbml_w.start_tag(tag_macro_def); - - encode_name(rbml_w, def.name); - encode_attributes(rbml_w, &def.attrs); - let &BytePos(lo) = &def.span.lo; - let &BytePos(hi) = &def.span.hi; - rbml_w.wr_tagged_u32(tag_macro_def_span_lo, lo); - rbml_w.wr_tagged_u32(tag_macro_def_span_hi, hi); - - rbml_w.wr_tagged_str(tag_macro_def_body, - &::syntax::print::pprust::tts_to_string(&def.body)); - - rbml_w.end_tag(); - } - rbml_w.end_tag(); -} - -fn encode_struct_field_attrs(ecx: &EncodeContext, - rbml_w: &mut Encoder, - krate: &hir::Crate) { - struct StructFieldVisitor<'a, 'b:'a, 'c:'a, 'tcx:'b> { - ecx: &'a EncodeContext<'b, 'tcx>, - rbml_w: &'a mut Encoder<'c>, - } - - impl<'a, 'b, 'c, 'tcx, 'v> Visitor<'v> for StructFieldVisitor<'a, 'b, 'c, 'tcx> { - fn visit_struct_field(&mut self, field: &hir::StructField) { - self.rbml_w.start_tag(tag_struct_field); - let def_id = self.ecx.tcx.map.local_def_id(field.id); - encode_def_id(self.rbml_w, def_id); - encode_attributes(self.rbml_w, &field.attrs); - self.rbml_w.end_tag(); - } + /// Serialize the text of the exported macros + fn encode_macro_defs(&mut self) -> LazySeq { + let tcx = self.tcx; + self.lazy_seq(tcx.map.krate().exported_macros.iter().map(|def| { + MacroDef { + name: def.name, + attrs: def.attrs.to_vec(), + span: def.span, + body: ::syntax::print::pprust::tts_to_string(&def.body) + } + })) } - - rbml_w.start_tag(tag_struct_fields); - krate.visit_all_items(&mut StructFieldVisitor { ecx: ecx, rbml_w: rbml_w }); - rbml_w.end_tag(); } - - struct ImplVisitor<'a, 'tcx:'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, - impls: FnvHashMap> + impls: FnvHashMap> } impl<'a, 'tcx, 'v> Visitor<'v> for ImplVisitor<'a, 'tcx> { @@ -1681,333 +1185,221 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ImplVisitor<'a, 'tcx> { if let Some(trait_ref) = self.tcx.impl_trait_ref(impl_id) { self.impls.entry(trait_ref.def_id) .or_insert(vec![]) - .push(impl_id); + .push(impl_id.index); } } } } -/// Encodes an index, mapping each trait to its (local) implementations. -fn encode_impls<'a>(ecx: &'a EncodeContext, - krate: &hir::Crate, - rbml_w: &'a mut Encoder) { - let mut visitor = ImplVisitor { - tcx: ecx.tcx, - impls: FnvHashMap() - }; - krate.visit_all_items(&mut visitor); - - rbml_w.start_tag(tag_impls); - for (trait_, trait_impls) in visitor.impls { - rbml_w.start_tag(tag_impls_trait); - encode_def_id(rbml_w, trait_); - for impl_ in trait_impls { - rbml_w.wr_tagged_u64(tag_impls_trait_impl, def_to_u64(impl_)); - } - rbml_w.end_tag(); - } - rbml_w.end_tag(); -} - -fn encode_misc_info(ecx: &EncodeContext, - krate: &hir::Crate, - rbml_w: &mut Encoder) { - rbml_w.start_tag(tag_misc_info); - rbml_w.start_tag(tag_misc_info_crate_items); - for item_id in &krate.module.item_ids { - rbml_w.wr_tagged_u64(tag_mod_child, - def_to_u64(ecx.tcx.map.local_def_id(item_id.id))); - - let item = ecx.tcx.map.expect_item(item_id.id); - each_auxiliary_node_id(item, |auxiliary_node_id| { - rbml_w.wr_tagged_u64(tag_mod_child, - def_to_u64(ecx.tcx.map.local_def_id(auxiliary_node_id))); - true - }); - } - - // Encode reexports for the root module. - encode_reexports(ecx, rbml_w, 0); +impl<'a, 'tcx> EncodeContext<'a, 'tcx> { + /// Encodes an index, mapping each trait to its (local) implementations. + fn encode_impls(&mut self) -> LazySeq { + let mut visitor = ImplVisitor { + tcx: self.tcx, + impls: FnvHashMap() + }; + self.tcx.map.krate().visit_all_items(&mut visitor); - rbml_w.end_tag(); - rbml_w.end_tag(); -} + let all_impls: Vec<_> = visitor.impls.into_iter().map(|(trait_def_id, impls)| { + TraitImpls { + trait_id: (trait_def_id.krate.as_u32(), trait_def_id.index), + impls: self.lazy_seq(impls) + } + }).collect(); -// Encodes all reachable symbols in this crate into the metadata. -// -// This pass is seeded off the reachability list calculated in the -// middle::reachable module but filters out items that either don't have a -// symbol associated with them (they weren't translated) or if they're an FFI -// definition (as that's not defined in this crate). -fn encode_reachable(ecx: &EncodeContext, rbml_w: &mut Encoder) { - rbml_w.start_tag(tag_reachable_ids); - for &id in ecx.reachable { - let def_id = ecx.tcx.map.local_def_id(id); - rbml_w.wr_tagged_u32(tag_reachable_id, def_id.index.as_u32()); + self.lazy_seq(all_impls) } - rbml_w.end_tag(); -} - -fn encode_crate_dep(rbml_w: &mut Encoder, - dep: &cstore::CrateMetadata) { - rbml_w.start_tag(tag_crate_dep); - rbml_w.wr_tagged_str(tag_crate_dep_crate_name, &dep.name()); - let hash = decoder::get_crate_hash(dep.data()); - rbml_w.wr_tagged_u64(tag_crate_dep_hash, hash.as_u64()); - rbml_w.wr_tagged_u8(tag_crate_dep_explicitly_linked, - dep.explicitly_linked.get() as u8); - rbml_w.end_tag(); -} - -fn encode_hash(rbml_w: &mut Encoder, hash: &Svh) { - rbml_w.wr_tagged_u64(tag_crate_hash, hash.as_u64()); -} - -fn encode_rustc_version(rbml_w: &mut Encoder) { - rbml_w.wr_tagged_str(tag_rustc_version, &rustc_version()); -} - -fn encode_crate_name(rbml_w: &mut Encoder, crate_name: &str) { - rbml_w.wr_tagged_str(tag_crate_crate_name, crate_name); -} -fn encode_crate_disambiguator(rbml_w: &mut Encoder, crate_disambiguator: &str) { - rbml_w.wr_tagged_str(tag_crate_disambiguator, crate_disambiguator); -} - -fn encode_crate_triple(rbml_w: &mut Encoder, triple: &str) { - rbml_w.wr_tagged_str(tag_crate_triple, triple); -} - -fn encode_dylib_dependency_formats(rbml_w: &mut Encoder, ecx: &EncodeContext) { - let tag = tag_dylib_dependency_formats; - match ecx.tcx.sess.dependency_formats.borrow().get(&config::CrateTypeDylib) { - Some(arr) => { - let s = arr.iter().enumerate().filter_map(|(i, slot)| { - let kind = match *slot { - Linkage::NotLinked | - Linkage::IncludedFromDylib => return None, - Linkage::Dynamic => "d", - Linkage::Static => "s", - }; - Some(format!("{}:{}", i + 1, kind)) - }).collect::>(); - rbml_w.wr_tagged_str(tag, &s.join(",")); - } - None => { - rbml_w.wr_tagged_str(tag, ""); - } + // Encodes all reachable symbols in this crate into the metadata. + // + // This pass is seeded off the reachability list calculated in the + // middle::reachable module but filters out items that either don't have a + // symbol associated with them (they weren't translated) or if they're an FFI + // definition (as that's not defined in this crate). + fn encode_reachable(&mut self) -> LazySeq { + let reachable = self.reachable; + let tcx = self.tcx; + self.lazy_seq(reachable.iter().map(|&id| tcx.map.local_def_id(id).index)) } -} -fn encode_panic_strategy(rbml_w: &mut Encoder, ecx: &EncodeContext) { - match ecx.tcx.sess.opts.cg.panic { - PanicStrategy::Unwind => { - rbml_w.wr_tagged_u8(tag_panic_strategy, b'U'); - } - PanicStrategy::Abort => { - rbml_w.wr_tagged_u8(tag_panic_strategy, b'A'); + fn encode_dylib_dependency_formats(&mut self) -> LazySeq> { + match self.tcx.sess.dependency_formats.borrow().get(&config::CrateTypeDylib) { + Some(arr) => { + self.lazy_seq(arr.iter().map(|slot| { + match *slot { + Linkage::NotLinked | + Linkage::IncludedFromDylib => None, + + Linkage::Dynamic => Some(LinkagePreference::RequireDynamic), + Linkage::Static => Some(LinkagePreference::RequireStatic), + } + })) + } + None => LazySeq::empty() } } -} -pub fn encode_metadata(ecx: EncodeContext, krate: &hir::Crate) -> Vec { - let mut wr = Cursor::new(Vec::new()); + fn encode_crate_root(&mut self) -> Lazy { + let mut i = self.position(); + let crate_deps = self.encode_crate_deps(); + let dylib_dependency_formats = self.encode_dylib_dependency_formats(); + let dep_bytes = self.position() - i; + + // Encode the language items. + i = self.position(); + let (lang_items, lang_items_missing) = self.encode_lang_items(); + let lang_item_bytes = self.position() - i; + + // Encode the native libraries used + i = self.position(); + let native_libraries = self.encode_native_libraries(); + let native_lib_bytes = self.position() - i; + + // Encode codemap + i = self.position(); + let codemap = self.encode_codemap(); + let codemap_bytes = self.position() - i; + + // Encode macro definitions + i = self.position(); + let macro_defs = self.encode_macro_defs(); + let macro_defs_bytes = self.position() - i; + + // Encode the def IDs of impls, for coherence checking. + i = self.position(); + let impls = self.encode_impls(); + let impl_bytes = self.position() - i; + + // Encode reachability info. + i = self.position(); + let reachable_ids = self.encode_reachable(); + let reachable_bytes = self.position() - i; + + // Encode and index the items. + i = self.position(); + let items = self.encode_info_for_items(); + let item_bytes = self.position() - i; + + i = self.position(); + let index = items.write_index(&mut self.opaque.cursor); + let index_bytes = self.position() - i; + + let tcx = self.tcx; + let link_meta = self.link_meta; + let is_rustc_macro = tcx.sess.crate_types.borrow().contains(&CrateTypeRustcMacro); + let root = self.lazy(&CrateRoot { + rustc_version: RUSTC_VERSION.to_string(), + name: link_meta.crate_name.clone(), + triple: tcx.sess.opts.target_triple.clone(), + hash: link_meta.crate_hash, + disambiguator: tcx.sess.local_crate_disambiguator().to_string(), + panic_strategy: tcx.sess.opts.cg.panic.clone(), + plugin_registrar_fn: tcx.sess.plugin_registrar_fn.get().map(|id| { + tcx.map.local_def_id(id).index + }), + macro_derive_registrar: if is_rustc_macro { + let id = tcx.sess.derive_registrar_fn.get().unwrap(); + Some(tcx.map.local_def_id(id).index) + } else { + None + }, + + crate_deps: crate_deps, + dylib_dependency_formats: dylib_dependency_formats, + lang_items: lang_items, + lang_items_missing: lang_items_missing, + native_libraries: native_libraries, + codemap: codemap, + macro_defs: macro_defs, + impls: impls, + reachable_ids: reachable_ids, + index: index, + }); - { - let mut rbml_w = Encoder::new(&mut wr); - encode_metadata_inner(&mut rbml_w, &ecx, krate) - } + let total_bytes = self.position(); - // RBML compacts the encoded bytes whenever appropriate, - // so there are some garbages left after the end of the data. - let metalen = wr.seek(SeekFrom::Current(0)).unwrap() as usize; - let mut v = wr.into_inner(); - v.truncate(metalen); - assert_eq!(v.len(), metalen); - - // And here we run into yet another obscure archive bug: in which metadata - // loaded from archives may have trailing garbage bytes. Awhile back one of - // our tests was failing sporadically on the OSX 64-bit builders (both nopt - // and opt) by having rbml generate an out-of-bounds panic when looking at - // metadata. - // - // Upon investigation it turned out that the metadata file inside of an rlib - // (and ar archive) was being corrupted. Some compilations would generate a - // metadata file which would end in a few extra bytes, while other - // compilations would not have these extra bytes appended to the end. These - // extra bytes were interpreted by rbml as an extra tag, so they ended up - // being interpreted causing the out-of-bounds. - // - // The root cause of why these extra bytes were appearing was never - // discovered, and in the meantime the solution we're employing is to insert - // the length of the metadata to the start of the metadata. Later on this - // will allow us to slice the metadata to the precise length that we just - // generated regardless of trailing bytes that end up in it. - // - // We also need to store the metadata encoding version here, because - // rlibs don't have it. To get older versions of rustc to ignore - // this metadata, there are 4 zero bytes at the start, which are - // treated as a length of 0 by old compilers. - - let len = v.len(); - let mut result = vec![]; - result.push(0); - result.push(0); - result.push(0); - result.push(0); - result.extend(metadata_encoding_version.iter().cloned()); - result.push((len >> 24) as u8); - result.push((len >> 16) as u8); - result.push((len >> 8) as u8); - result.push((len >> 0) as u8); - result.extend(v); - result -} - -fn encode_metadata_inner(rbml_w: &mut Encoder, - ecx: &EncodeContext, - krate: &hir::Crate) { - struct Stats { - attr_bytes: u64, - dep_bytes: u64, - lang_item_bytes: u64, - native_lib_bytes: u64, - plugin_registrar_fn_bytes: u64, - codemap_bytes: u64, - macro_defs_bytes: u64, - impl_bytes: u64, - misc_bytes: u64, - item_bytes: u64, - index_bytes: u64, - xref_bytes: u64, - zero_bytes: u64, - total_bytes: u64, - } - let mut stats = Stats { - attr_bytes: 0, - dep_bytes: 0, - lang_item_bytes: 0, - native_lib_bytes: 0, - plugin_registrar_fn_bytes: 0, - codemap_bytes: 0, - macro_defs_bytes: 0, - impl_bytes: 0, - misc_bytes: 0, - item_bytes: 0, - index_bytes: 0, - xref_bytes: 0, - zero_bytes: 0, - total_bytes: 0, - }; - - encode_rustc_version(rbml_w); - encode_crate_name(rbml_w, &ecx.link_meta.crate_name); - encode_crate_triple(rbml_w, &ecx.tcx.sess.opts.target_triple); - encode_hash(rbml_w, &ecx.link_meta.crate_hash); - encode_crate_disambiguator(rbml_w, &ecx.tcx.sess.local_crate_disambiguator()); - encode_dylib_dependency_formats(rbml_w, &ecx); - encode_panic_strategy(rbml_w, &ecx); - - let mut i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_attributes(rbml_w, &krate.attrs); - stats.attr_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_crate_deps(rbml_w, ecx.cstore); - stats.dep_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode the language items. - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_lang_items(&ecx, rbml_w); - stats.lang_item_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode the native libraries used - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_native_libraries(&ecx, rbml_w); - stats.native_lib_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode the plugin registrar function - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_plugin_registrar_fn(&ecx, rbml_w); - stats.plugin_registrar_fn_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode codemap - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_codemap(&ecx, rbml_w); - stats.codemap_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode macro definitions - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_macro_defs(rbml_w, krate); - stats.macro_defs_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode the def IDs of impls, for coherence checking. - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_impls(&ecx, krate, rbml_w); - stats.impl_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode miscellaneous info. - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_misc_info(&ecx, krate, rbml_w); - encode_reachable(&ecx, rbml_w); - stats.misc_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode and index the items. - rbml_w.start_tag(tag_items); - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - let index = encode_info_for_items(&ecx, rbml_w); - stats.item_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - rbml_w.end_tag(); - - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_item_index(rbml_w, index.items); - stats.index_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_xrefs(&ecx, rbml_w, index.xrefs); - stats.xref_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - encode_struct_field_attrs(&ecx, rbml_w, krate); - - stats.total_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - - if ecx.tcx.sess.meta_stats() { - for e in rbml_w.writer.get_ref() { - if *e == 0 { - stats.zero_bytes += 1; + if self.tcx.sess.meta_stats() { + let mut zero_bytes = 0; + for e in self.opaque.cursor.get_ref() { + if *e == 0 { + zero_bytes += 1; + } } + + println!("metadata stats:"); + println!(" dep bytes: {}", dep_bytes); + println!(" lang item bytes: {}", lang_item_bytes); + println!(" native bytes: {}", native_lib_bytes); + println!(" codemap bytes: {}", codemap_bytes); + println!(" macro def bytes: {}", macro_defs_bytes); + println!(" impl bytes: {}", impl_bytes); + println!(" reachable bytes: {}", reachable_bytes); + println!(" item bytes: {}", item_bytes); + println!(" index bytes: {}", index_bytes); + println!(" zero bytes: {}", zero_bytes); + println!(" total bytes: {}", total_bytes); } - println!("metadata stats:"); - println!(" attribute bytes: {}", stats.attr_bytes); - println!(" dep bytes: {}", stats.dep_bytes); - println!(" lang item bytes: {}", stats.lang_item_bytes); - println!(" native bytes: {}", stats.native_lib_bytes); - println!("plugin registrar bytes: {}", stats.plugin_registrar_fn_bytes); - println!(" codemap bytes: {}", stats.codemap_bytes); - println!(" macro def bytes: {}", stats.macro_defs_bytes); - println!(" impl bytes: {}", stats.impl_bytes); - println!(" misc bytes: {}", stats.misc_bytes); - println!(" item bytes: {}", stats.item_bytes); - println!(" index bytes: {}", stats.index_bytes); - println!(" xref bytes: {}", stats.xref_bytes); - println!(" zero bytes: {}", stats.zero_bytes); - println!(" total bytes: {}", stats.total_bytes); + root } } -// Get the encoded string for a type -pub fn encoded_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - t: Ty<'tcx>, - def_id_to_string: for<'b> fn(TyCtxt<'b, 'tcx, 'tcx>, DefId) -> String) - -> Vec { - let mut wr = Cursor::new(Vec::new()); - tyencode::enc_ty(&mut wr, &tyencode::ctxt { - diag: tcx.sess.diagnostic(), - ds: def_id_to_string, +// NOTE(eddyb) The following comment was preserved for posterity, even +// though it's no longer relevant as EBML (which uses nested & tagged +// "documents") was replaced with a scheme that can't go out of bounds. +// +// And here we run into yet another obscure archive bug: in which metadata +// loaded from archives may have trailing garbage bytes. Awhile back one of +// our tests was failing sporadically on the OSX 64-bit builders (both nopt +// and opt) by having ebml generate an out-of-bounds panic when looking at +// metadata. +// +// Upon investigation it turned out that the metadata file inside of an rlib +// (and ar archive) was being corrupted. Some compilations would generate a +// metadata file which would end in a few extra bytes, while other +// compilations would not have these extra bytes appended to the end. These +// extra bytes were interpreted by ebml as an extra tag, so they ended up +// being interpreted causing the out-of-bounds. +// +// The root cause of why these extra bytes were appearing was never +// discovered, and in the meantime the solution we're employing is to insert +// the length of the metadata to the start of the metadata. Later on this +// will allow us to slice the metadata to the precise length that we just +// generated regardless of trailing bytes that end up in it. + +pub fn encode_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + cstore: &cstore::CStore, + reexports: &def::ExportMap, + link_meta: &LinkMeta, + reachable: &NodeSet, + mir_map: &MirMap<'tcx>) -> Vec { + let mut cursor = Cursor::new(vec![]); + cursor.write_all(METADATA_HEADER).unwrap(); + + // Will be filed with the root position after encoding everything. + cursor.write_all(&[0, 0, 0, 0]).unwrap(); + + let root = EncodeContext { + opaque: opaque::Encoder::new(&mut cursor), tcx: tcx, - abbrevs: &RefCell::new(FnvHashMap()) - }, t); - wr.into_inner() + reexports: reexports, + link_meta: link_meta, + cstore: cstore, + reachable: reachable, + mir_map: mir_map, + lazy_state: LazyState::NoNode, + type_shorthands: Default::default(), + predicate_shorthands: Default::default() + }.encode_crate_root(); + let mut result = cursor.into_inner(); + + // Encode the root position. + let header = METADATA_HEADER.len(); + let pos = root.position; + result[header + 0] = (pos >> 24) as u8; + result[header + 1] = (pos >> 16) as u8; + result[header + 2] = (pos >> 8) as u8; + result[header + 3] = (pos >> 0) as u8; + + result } diff --git a/src/librustc_metadata/index.rs b/src/librustc_metadata/index.rs index b850073462..ef83251f51 100644 --- a/src/librustc_metadata/index.rs +++ b/src/librustc_metadata/index.rs @@ -8,53 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use schema::*; + use rustc::hir::def_id::{DefId, DefIndex}; -use rbml; use std::io::{Cursor, Write}; use std::slice; use std::u32; -/// As part of the metadata, we generate an index that stores, for -/// each DefIndex, the position of the corresponding RBML document (if -/// any). This is just a big `[u32]` slice, where an entry of -/// `u32::MAX` indicates that there is no RBML document. This little -/// struct just stores the offsets within the metadata of the start -/// and end of this slice. These are actually part of an RBML -/// document, but for looking things up in the metadata, we just -/// discard the RBML positioning and jump directly to the data. -pub struct Index { - data_start: usize, - data_end: usize, -} - -impl Index { - /// Given the RBML doc representing the index, save the offests - /// for later. - pub fn from_rbml(index: rbml::Doc) -> Index { - Index { data_start: index.start, data_end: index.end } - } - - /// Given the metadata, extract out the offset of a particular - /// DefIndex (if any). - #[inline(never)] - pub fn lookup_item(&self, bytes: &[u8], def_index: DefIndex) -> Option { - let words = bytes_to_words(&bytes[self.data_start..self.data_end]); - let index = def_index.as_usize(); - - debug!("lookup_item: index={:?} words.len={:?}", - index, words.len()); - - let position = u32::from_be(words[index]); - if position == u32::MAX { - debug!("lookup_item: position=u32::MAX"); - None - } else { - debug!("lookup_item: position={:?}", position); - Some(position) - } - } -} - /// While we are generating the metadata, we also track the position /// of each DefIndex. It is not required that all definitions appear /// in the metadata, nor that they are serialized in order, and @@ -62,84 +22,81 @@ impl Index { /// `u32::MAX`. Whenever an index is visited, we fill in the /// appropriate spot by calling `record_position`. We should never /// visit the same index twice. -pub struct IndexData { +pub struct Index { positions: Vec, } -impl IndexData { - pub fn new(max_index: usize) -> IndexData { - IndexData { +impl Index { + pub fn new(max_index: usize) -> Index { + Index { positions: vec![u32::MAX; max_index] } } - pub fn record(&mut self, def_id: DefId, position: u64) { + pub fn record(&mut self, def_id: DefId, entry: Lazy) { assert!(def_id.is_local()); - self.record_index(def_id.index, position); + self.record_index(def_id.index, entry); } - pub fn record_index(&mut self, item: DefIndex, position: u64) { + pub fn record_index(&mut self, item: DefIndex, entry: Lazy) { let item = item.as_usize(); - assert!(position < (u32::MAX as u64)); - let position = position as u32; + assert!(entry.position < (u32::MAX as usize)); + let position = entry.position as u32; assert!(self.positions[item] == u32::MAX, "recorded position for item {:?} twice, first at {:?} and now at {:?}", item, self.positions[item], position); - self.positions[item] = position; + self.positions[item] = position.to_le(); } - pub fn write_index(&self, buf: &mut Cursor>) { - for &position in &self.positions { - write_be_u32(buf, position); - } + pub fn write_index(&self, buf: &mut Cursor>) -> LazySeq { + let pos = buf.position(); + buf.write_all(words_to_bytes(&self.positions)).unwrap(); + LazySeq::with_position_and_length(pos as usize, self.positions.len()) } } -/// A dense index with integer keys. Different API from IndexData (should -/// these be merged?) -pub struct DenseIndex { - start: usize, - end: usize -} +impl<'tcx> LazySeq { + /// Given the metadata, extract out the offset of a particular + /// DefIndex (if any). + #[inline(never)] + pub fn lookup(&self, bytes: &[u8], def_index: DefIndex) -> Option>> { + let words = &bytes_to_words(&bytes[self.position..])[..self.len]; + let index = def_index.as_usize(); -impl DenseIndex { - pub fn lookup(&self, buf: &[u8], ix: u32) -> Option { - let data = bytes_to_words(&buf[self.start..self.end]); - data.get(ix as usize).map(|d| u32::from_be(*d)) - } - pub fn from_buf(buf: &[u8], start: usize, end: usize) -> Self { - assert!((end-start)%4 == 0 && start <= end && end <= buf.len()); - DenseIndex { - start: start, - end: end + debug!("Index::lookup: index={:?} words.len={:?}", + index, words.len()); + + let position = u32::from_le(words[index]); + if position == u32::MAX { + debug!("Index::lookup: position=u32::MAX"); + None + } else { + debug!("Index::lookup: position={:?}", position); + Some(Lazy::with_position(position as usize)) } } -} - -pub fn write_dense_index(entries: Vec, buf: &mut Cursor>) { - let elen = entries.len(); - assert!(elen < u32::MAX as usize); - for entry in entries { - write_be_u32(buf, entry); + pub fn iter_enumerated<'a>(&self, bytes: &'a [u8]) + -> impl Iterator>)> + 'a { + let words = &bytes_to_words(&bytes[self.position..])[..self.len]; + words.iter().enumerate().filter_map(|(index, &position)| { + if position == u32::MAX { + None + } else { + let position = u32::from_le(position) as usize; + Some((DefIndex::new(index), Lazy::with_position(position))) + } + }) } - - info!("write_dense_index: {} entries", elen); } -fn write_be_u32(w: &mut W, u: u32) { - let _ = w.write_all(&[ - (u >> 24) as u8, - (u >> 16) as u8, - (u >> 8) as u8, - (u >> 0) as u8, - ]); +fn bytes_to_words(b: &[u8]) -> &[u32] { + unsafe { slice::from_raw_parts(b.as_ptr() as *const u32, b.len() / 4) } } -fn bytes_to_words(b: &[u8]) -> &[u32] { - assert!(b.len() % 4 == 0); - unsafe { slice::from_raw_parts(b.as_ptr() as *const u32, b.len()/4) } +fn words_to_bytes(w: &[u32]) -> &[u8] { + unsafe { slice::from_raw_parts(w.as_ptr() as *const u8, w.len() * 4) } } diff --git a/src/librustc_metadata/index_builder.rs b/src/librustc_metadata/index_builder.rs new file mode 100644 index 0000000000..aeb6f63252 --- /dev/null +++ b/src/librustc_metadata/index_builder.rs @@ -0,0 +1,224 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Builder types for generating the "item data" section of the +//! metadata. This section winds up looking like this: +//! +//! ``` +//! // big list of item-like things... +//! // ...for most def-ids, there is an entry. +//! +//! +//! ``` +//! +//! As we generate this listing, we collect the offset of each +//! `data_item` entry and store it in an index. Then, when we load the +//! metadata, we can skip right to the metadata for a particular item. +//! +//! In addition to the offset, we need to track the data that was used +//! to generate the contents of each `data_item`. This is so that we +//! can figure out which HIR nodes contributed to that data for +//! incremental compilation purposes. +//! +//! The `IndexBuilder` facilitates both of these. It is created +//! with an `EncodingContext` (`ecx`), which it encapsulates. +//! It has one main method, `record()`. You invoke `record` +//! like so to create a new `data_item` element in the list: +//! +//! ``` +//! index.record(some_def_id, callback_fn, data) +//! ``` +//! +//! What record will do is to (a) record the current offset, (b) emit +//! the `common::data_item` tag, and then call `callback_fn` with the +//! given data as well as the `EncodingContext`. Once `callback_fn` +//! returns, the `common::data_item` tag will be closed. +//! +//! `EncodingContext` does not offer the `record` method, so that we +//! can ensure that `common::data_item` elements are never nested. +//! +//! In addition, while the `callback_fn` is executing, we will push a +//! task `MetaData(some_def_id)`, which can then observe the +//! reads/writes that occur in the task. For this reason, the `data` +//! argument that is given to the `callback_fn` must implement the +//! trait `DepGraphRead`, which indicates how to register reads on the +//! data in this new task (note that many types of data, such as +//! `DefId`, do not currently require any reads to be registered, +//! since they are not derived from a HIR node). This is also why we +//! give a callback fn, rather than taking a closure: it allows us to +//! easily control precisely what data is given to that fn. + +use encoder::EncodeContext; +use index::Index; +use schema::*; + +use rustc::dep_graph::DepNode; +use rustc::hir; +use rustc::hir::def_id::DefId; +use rustc::ty::TyCtxt; +use syntax::ast; + +use std::ops::{Deref, DerefMut}; + +/// Builder that can encode new items, adding them into the index. +/// Item encoding cannot be nested. +pub struct IndexBuilder<'a, 'b: 'a, 'tcx: 'b> { + items: Index, + pub ecx: &'a mut EncodeContext<'b, 'tcx>, +} + +impl<'a, 'b, 'tcx> Deref for IndexBuilder<'a, 'b, 'tcx> { + type Target = EncodeContext<'b, 'tcx>; + fn deref(&self) -> &Self::Target { + self.ecx + } +} + +impl<'a, 'b, 'tcx> DerefMut for IndexBuilder<'a, 'b, 'tcx> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.ecx + } +} + +impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { + pub fn new(ecx: &'a mut EncodeContext<'b, 'tcx>) -> Self { + IndexBuilder { + items: Index::new(ecx.tcx.map.num_local_def_ids()), + ecx: ecx, + } + } + + /// Emit the data for a def-id to the metadata. The function to + /// emit the data is `op`, and it will be given `data` as + /// arguments. This `record` function will call `op` to generate + /// the `Entry` (which may point to other encoded information) + /// and will then record the `Lazy` for use in the index. + /// + /// In addition, it will setup a dep-graph task to track what data + /// `op` accesses to generate the metadata, which is later used by + /// incremental compilation to compute a hash for the metadata and + /// track changes. + /// + /// The reason that `op` is a function pointer, and not a closure, + /// is that we want to be able to completely track all data it has + /// access to, so that we can be sure that `DATA: DepGraphRead` + /// holds, and that it is therefore not gaining "secret" access to + /// bits of HIR or other state that would not be trackd by the + /// content system. + pub fn record(&mut self, + id: DefId, + op: fn(&mut EncodeContext<'b, 'tcx>, DATA) -> Entry<'tcx>, + data: DATA) + where DATA: DepGraphRead + { + let _task = self.tcx.dep_graph.in_task(DepNode::MetaData(id)); + data.read(self.tcx); + let entry = op(&mut self.ecx, data); + self.items.record(id, self.ecx.lazy(&entry)); + } + + pub fn into_items(self) -> Index { + self.items + } +} + +/// Trait used for data that can be passed from outside a dep-graph +/// task. The data must either be of some safe type, such as a +/// `DefId` index, or implement the `read` method so that it can add +/// a read of whatever dep-graph nodes are appropriate. +pub trait DepGraphRead { + fn read(&self, tcx: TyCtxt); +} + +impl DepGraphRead for DefId { + fn read(&self, _tcx: TyCtxt) { } +} + +impl DepGraphRead for ast::NodeId { + fn read(&self, _tcx: TyCtxt) { } +} + +impl DepGraphRead for Option + where T: DepGraphRead +{ + fn read(&self, tcx: TyCtxt) { + match *self { + Some(ref v) => v.read(tcx), + None => (), + } + } +} + +impl DepGraphRead for [T] + where T: DepGraphRead +{ + fn read(&self, tcx: TyCtxt) { + for i in self { + i.read(tcx); + } + } +} + +macro_rules! read_tuple { + ($($name:ident),*) => { + impl<$($name),*> DepGraphRead for ($($name),*) + where $($name: DepGraphRead),* + { + #[allow(non_snake_case)] + fn read(&self, tcx: TyCtxt) { + let &($(ref $name),*) = self; + $($name.read(tcx);)* + } + } + } +} +read_tuple!(A,B); +read_tuple!(A,B,C); + +macro_rules! read_hir { + ($t:ty) => { + impl<'tcx> DepGraphRead for &'tcx $t { + fn read(&self, tcx: TyCtxt) { + tcx.map.read(self.id); + } + } + } +} +read_hir!(hir::Item); +read_hir!(hir::ImplItem); +read_hir!(hir::TraitItem); +read_hir!(hir::ForeignItem); + +/// Leaks access to a value of type T without any tracking. This is +/// suitable for ambiguous types like `usize`, which *could* represent +/// tracked data (e.g., if you read it out of a HIR node) or might not +/// (e.g., if it's an index). Adding in an `Untracked` is an +/// assertion, essentially, that the data does not need to be tracked +/// (or that read edges will be added by some other way). +/// +/// A good idea is to add to each use of `Untracked` an explanation of +/// why this value is ok. +pub struct Untracked(pub T); + +impl DepGraphRead for Untracked { + fn read(&self, _tcx: TyCtxt) { } +} + +/// Newtype that can be used to package up misc data extracted from a +/// HIR node that doesn't carry its own id. This will allow an +/// arbitrary `T` to be passed in, but register a read on the given +/// node-id. +pub struct FromId(pub ast::NodeId, pub T); + +impl DepGraphRead for FromId { + fn read(&self, tcx: TyCtxt) { + tcx.map.read(self.0); + } +} diff --git a/src/librustc_metadata/lib.rs b/src/librustc_metadata/lib.rs index cd92493e3d..50e1cbf565 100644 --- a/src/librustc_metadata/lib.rs +++ b/src/librustc_metadata/lib.rs @@ -17,48 +17,48 @@ html_root_url = "https://doc.rust-lang.org/nightly/")] #![cfg_attr(not(stage0), deny(warnings))] +#![feature(conservative_impl_trait)] +#![feature(core_intrinsics)] #![feature(box_patterns)] -#![feature(enumset)] +#![feature(dotdot_in_tuple_patterns)] +#![cfg_attr(stage0, feature(question_mark))] #![feature(quote)] #![feature(rustc_diagnostic_macros)] +#![feature(rustc_macro_lib)] +#![feature(rustc_macro_internals)] #![feature(rustc_private)] +#![feature(specialization)] #![feature(staged_api)] -#![feature(question_mark)] #[macro_use] extern crate log; #[macro_use] extern crate syntax; -#[macro_use] #[no_link] extern crate rustc_bitflags; extern crate syntax_pos; extern crate flate; -extern crate rbml; extern crate serialize as rustc_serialize; // used by deriving extern crate rustc_errors as errors; +extern crate syntax_ext; #[macro_use] extern crate rustc; extern crate rustc_data_structures; extern crate rustc_back; extern crate rustc_llvm; +extern crate rustc_macro; extern crate rustc_const_math; -pub use rustc::middle; +mod diagnostics; -#[macro_use] -mod macros; - -pub mod diagnostics; +mod astencode; +mod index_builder; +mod index; +mod encoder; +mod decoder; +mod csearch; +mod schema; -pub mod astencode; -pub mod common; -pub mod def_key; -pub mod tyencode; -pub mod tydecode; -pub mod encoder; -pub mod decoder; pub mod creader; -pub mod csearch; pub mod cstore; -pub mod index; pub mod loader; pub mod macro_import; -pub mod tls_context; + +__build_diagnostic_array! { librustc_metadata, DIAGNOSTICS } diff --git a/src/librustc_metadata/loader.rs b/src/librustc_metadata/loader.rs index 2345cd9a92..fc94cec916 100644 --- a/src/librustc_metadata/loader.rs +++ b/src/librustc_metadata/loader.rs @@ -212,15 +212,15 @@ //! no means all of the necessary details. Take a look at the rest of //! metadata::loader or metadata::creader for all the juicy details! -use cstore::{MetadataBlob, MetadataVec, MetadataArchive}; -use common::{metadata_encoding_version, rustc_version}; -use decoder; +use cstore::MetadataBlob; +use schema::{METADATA_HEADER, RUSTC_VERSION}; use rustc::hir::svh::Svh; use rustc::session::Session; use rustc::session::filesearch::{FileSearch, FileMatches, FileDoesntMatch}; use rustc::session::search_paths::PathKind; use rustc::util::common; +use rustc::util::nodemap::FnvHashMap; use rustc_llvm as llvm; use rustc_llvm::{False, ObjectFile, mk_section_iter}; @@ -230,7 +230,6 @@ use syntax_pos::Span; use rustc_back::target::Target; use std::cmp; -use std::collections::HashMap; use std::fmt; use std::fs; use std::io; @@ -342,9 +341,11 @@ impl<'a> Context<'a> { "found crate `{}` compiled by an incompatible version of rustc{}", self.ident, add) } else { - struct_span_err!(self.sess, self.span, E0463, - "can't find crate for `{}`{}", - self.ident, add) + let mut err = struct_span_err!(self.sess, self.span, E0463, + "can't find crate for `{}`{}", + self.ident, add); + err.span_label(self.span, &format!("can't find crate")); + err }; if !self.rejected_via_triple.is_empty() { @@ -381,7 +382,7 @@ impl<'a> Context<'a> { } if !self.rejected_via_version.is_empty() { err.help(&format!("please recompile that crate using this compiler ({})", - rustc_version())); + RUSTC_VERSION)); let mismatches = self.rejected_via_version.iter(); for (i, &CrateMismatch { ref path, ref got }) in mismatches.enumerate() { err.note(&format!("crate `{}` path #{}: {} compiled by {:?}", @@ -413,7 +414,7 @@ impl<'a> Context<'a> { let rlib_prefix = format!("lib{}", self.crate_name); let staticlib_prefix = format!("{}{}", staticpair.0, self.crate_name); - let mut candidates = HashMap::new(); + let mut candidates = FnvHashMap(); let mut staticlibs = vec!(); // First, find all possible candidate rlibs and dylibs purely based on @@ -456,7 +457,7 @@ impl<'a> Context<'a> { let hash_str = hash.to_string(); let slot = candidates.entry(hash_str) - .or_insert_with(|| (HashMap::new(), HashMap::new())); + .or_insert_with(|| (FnvHashMap(), FnvHashMap())); let (ref mut rlibs, ref mut dylibs) = *slot; fs::canonicalize(path).map(|p| { if rlib { @@ -477,7 +478,7 @@ impl<'a> Context<'a> { // A Library candidate is created if the metadata for the set of // libraries corresponds to the crate id and hash criteria that this // search is being performed for. - let mut libraries = HashMap::new(); + let mut libraries = FnvHashMap(); for (_hash, (rlibs, dylibs)) in candidates { let mut slot = None; let rlib = self.extract_one(rlibs, CrateFlavor::Rlib, &mut slot); @@ -509,9 +510,7 @@ impl<'a> Context<'a> { if let Some((ref p, _)) = lib.rlib { err.note(&format!("path: {}", p.display())); } - let data = lib.metadata.as_slice(); - let name = decoder::get_crate_name(data); - note_crate_name(&mut err, &name); + note_crate_name(&mut err, &lib.metadata.get_root().name); } err.emit(); None @@ -527,7 +526,7 @@ impl<'a> Context<'a> { // read the metadata from it if `*slot` is `None`. If the metadata couldn't // be read, it is assumed that the file isn't a valid rust library (no // errors are emitted). - fn extract_one(&mut self, m: HashMap, flavor: CrateFlavor, + fn extract_one(&mut self, m: FnvHashMap, flavor: CrateFlavor, slot: &mut Option<(Svh, MetadataBlob)>) -> Option<(PathBuf, PathKind)> { let mut ret: Option<(PathBuf, PathKind)> = None; let mut error = 0; @@ -549,7 +548,7 @@ impl<'a> Context<'a> { info!("{} reading metadata from: {}", flavor, lib.display()); let (hash, metadata) = match get_metadata_section(self.target, flavor, &lib) { Ok(blob) => { - if let Some(h) = self.crate_matches(blob.as_slice(), &lib) { + if let Some(h) = self.crate_matches(&blob, &lib) { (h, blob) } else { info!("metadata mismatch"); @@ -596,45 +595,38 @@ impl<'a> Context<'a> { } } - fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> Option { - let crate_rustc_version = decoder::crate_rustc_version(crate_data); - if crate_rustc_version != Some(rustc_version()) { - let message = crate_rustc_version.unwrap_or(format!("an unknown compiler")); - info!("Rejecting via version: expected {} got {}", rustc_version(), message); + fn crate_matches(&mut self, metadata: &MetadataBlob, libpath: &Path) -> Option { + let root = metadata.get_root(); + if root.rustc_version != RUSTC_VERSION { + info!("Rejecting via version: expected {} got {}", + RUSTC_VERSION, root.rustc_version); self.rejected_via_version.push(CrateMismatch { path: libpath.to_path_buf(), - got: message + got: root.rustc_version }); return None; } if self.should_match_name { - match decoder::maybe_get_crate_name(crate_data) { - Some(ref name) if self.crate_name == *name => {} - _ => { info!("Rejecting via crate name"); return None } + if self.crate_name != root.name { + info!("Rejecting via crate name"); return None; } } - let hash = match decoder::maybe_get_crate_hash(crate_data) { - None => { info!("Rejecting via lack of crate hash"); return None; } - Some(h) => h, - }; - let triple = match decoder::get_crate_triple(crate_data) { - None => { debug!("triple not present"); return None } - Some(t) => t, - }; - if triple != self.triple { - info!("Rejecting via crate triple: expected {} got {}", self.triple, triple); + if root.triple != self.triple { + info!("Rejecting via crate triple: expected {} got {}", + self.triple, root.triple); self.rejected_via_triple.push(CrateMismatch { path: libpath.to_path_buf(), - got: triple.to_string() + got: root.triple }); return None; } if let Some(myhash) = self.hash { - if *myhash != hash { - info!("Rejecting via hash: expected {} got {}", *myhash, hash); + if *myhash != root.hash { + info!("Rejecting via hash: expected {} got {}", + *myhash, root.hash); self.rejected_via_hash.push(CrateMismatch { path: libpath.to_path_buf(), got: myhash.to_string() @@ -643,7 +635,7 @@ impl<'a> Context<'a> { } } - Some(hash) + Some(root.hash) } @@ -669,8 +661,8 @@ impl<'a> Context<'a> { // rlibs/dylibs. let sess = self.sess; let dylibname = self.dylibname(); - let mut rlibs = HashMap::new(); - let mut dylibs = HashMap::new(); + let mut rlibs = FnvHashMap(); + let mut dylibs = FnvHashMap(); { let locs = locs.map(|l| PathBuf::from(l)).filter(|loc| { if !loc.exists() { @@ -764,11 +756,7 @@ impl ArchiveMetadata { fn verify_decompressed_encoding_version(blob: &MetadataBlob, filename: &Path) -> Result<(), String> { - let data = blob.as_slice_raw(); - if data.len() < 4+metadata_encoding_version.len() || - !<[u8]>::eq(&data[..4], &[0, 0, 0, 0]) || - &data[4..4+metadata_encoding_version.len()] != metadata_encoding_version - { + if !blob.is_compatible() { Err((format!("incompatible metadata version found: '{}'", filename.display()))) } else { @@ -803,11 +791,11 @@ fn get_metadata_section_imp(target: &Target, flavor: CrateFlavor, filename: &Pat filename.display())); } }; - return match ArchiveMetadata::new(archive).map(|ar| MetadataArchive(ar)) { + return match ArchiveMetadata::new(archive).map(|ar| MetadataBlob::Archive(ar)) { None => Err(format!("failed to read rlib metadata: '{}'", filename.display())), Some(blob) => { - try!(verify_decompressed_encoding_version(&blob, filename)); + verify_decompressed_encoding_version(&blob, filename)?; Ok(blob) } }; @@ -838,12 +826,12 @@ fn get_metadata_section_imp(target: &Target, flavor: CrateFlavor, filename: &Pat let cbuf = llvm::LLVMGetSectionContents(si.llsi); let csz = llvm::LLVMGetSectionSize(si.llsi) as usize; let cvbuf: *const u8 = cbuf as *const u8; - let vlen = metadata_encoding_version.len(); + let vlen = METADATA_HEADER.len(); debug!("checking {} bytes of metadata-version stamp", vlen); let minsz = cmp::min(vlen, csz); let buf0 = slice::from_raw_parts(cvbuf, minsz); - let version_ok = buf0 == metadata_encoding_version; + let version_ok = buf0 == METADATA_HEADER; if !version_ok { return Err((format!("incompatible metadata version found: '{}'", filename.display()))); @@ -855,8 +843,8 @@ fn get_metadata_section_imp(target: &Target, flavor: CrateFlavor, filename: &Pat let bytes = slice::from_raw_parts(cvbuf1, csz - vlen); match flate::inflate_bytes(bytes) { Ok(inflated) => { - let blob = MetadataVec(inflated); - try!(verify_decompressed_encoding_version(&blob, filename)); + let blob = MetadataBlob::Inflated(inflated); + verify_decompressed_encoding_version(&blob, filename)?; return Ok(blob); } Err(_) => {} @@ -900,7 +888,7 @@ pub fn list_file_metadata(target: &Target, path: &Path, let filename = path.file_name().unwrap().to_str().unwrap(); let flavor = if filename.ends_with(".rlib") { CrateFlavor::Rlib } else { CrateFlavor::Dylib }; match get_metadata_section(target, flavor, path) { - Ok(bytes) => decoder::list_crate_metadata(bytes.as_slice(), out), + Ok(metadata) => metadata.list_crate_metadata(out), Err(msg) => { write!(out, "{}\n", msg) } diff --git a/src/librustc_metadata/macro_import.rs b/src/librustc_metadata/macro_import.rs index b2a2dcf90f..2ff7a6c41b 100644 --- a/src/librustc_metadata/macro_import.rs +++ b/src/librustc_metadata/macro_import.rs @@ -10,48 +10,44 @@ //! Used by `rustc` when loading a crate with exported macros. -use creader::CrateReader; -use cstore::CStore; +use std::collections::HashSet; +use std::rc::Rc; +use std::env; +use std::mem; -use rustc::session::Session; +use creader::{CrateLoader, Macros}; -use std::collections::{HashSet, HashMap}; -use syntax::parse::token; +use rustc::hir::def_id::DefIndex; +use rustc::middle::cstore::LoadedMacro; +use rustc::session::Session; +use rustc::util::nodemap::FnvHashMap; +use rustc_back::dynamic_lib::DynamicLibrary; +use rustc_macro::TokenStream; +use rustc_macro::__internal::Registry; use syntax::ast; use syntax::attr; -use syntax::attr::AttrMetaMethods; -use syntax::ext; +use syntax::parse::token; +use syntax_ext::deriving::custom::CustomDerive; use syntax_pos::Span; -pub struct MacroLoader<'a> { - sess: &'a Session, - reader: CrateReader<'a>, -} - -impl<'a> MacroLoader<'a> { - pub fn new(sess: &'a Session, - cstore: &'a CStore, - crate_name: &str, - crate_config: ast::CrateConfig) - -> MacroLoader<'a> { - MacroLoader { - sess: sess, - reader: CrateReader::new(sess, cstore, crate_name, crate_config), - } - } -} - pub fn call_bad_macro_reexport(a: &Session, b: Span) { span_err!(a, b, E0467, "bad macro reexport"); } -pub type MacroSelection = HashMap; +pub type MacroSelection = FnvHashMap; + +pub fn load_macros(loader: &mut CrateLoader, extern_crate: &ast::Item, allows_macros: bool) + -> Vec { + loader.load_crate(extern_crate, allows_macros) +} -impl<'a> ext::base::MacroLoader for MacroLoader<'a> { - fn load_crate(&mut self, extern_crate: &ast::Item, allows_macros: bool) -> Vec { +impl<'a> CrateLoader<'a> { + fn load_crate(&mut self, + extern_crate: &ast::Item, + allows_macros: bool) -> Vec { // Parse the attributes relating to macros. - let mut import = Some(HashMap::new()); // None => load all - let mut reexport = HashMap::new(); + let mut import = Some(FnvHashMap()); // None => load all + let mut reexport = FnvHashMap(); for attr in &extern_crate.attrs { let mut used = true; @@ -64,8 +60,8 @@ impl<'a> ext::base::MacroLoader for MacroLoader<'a> { } if let (Some(sel), Some(names)) = (import.as_mut(), names) { for attr in names { - if attr.is_word() { - sel.insert(attr.name().clone(), attr.span()); + if let Some(word) = attr.word() { + sel.insert(word.name().clone(), attr.span()); } else { span_err!(self.sess, attr.span(), E0466, "bad macro import"); } @@ -82,8 +78,8 @@ impl<'a> ext::base::MacroLoader for MacroLoader<'a> { }; for attr in names { - if attr.is_word() { - reexport.insert(attr.name().clone(), attr.span()); + if let Some(word) = attr.word() { + reexport.insert(word.name().clone(), attr.span()); } else { call_bad_macro_reexport(self.sess, attr.span()); } @@ -98,15 +94,13 @@ impl<'a> ext::base::MacroLoader for MacroLoader<'a> { self.load_macros(extern_crate, allows_macros, import, reexport) } -} -impl<'a> MacroLoader<'a> { fn load_macros<'b>(&mut self, vi: &ast::Item, allows_macros: bool, import: Option, reexport: MacroSelection) - -> Vec { + -> Vec { if let Some(sel) = import.as_ref() { if sel.is_empty() && reexport.is_empty() { return Vec::new(); @@ -119,10 +113,11 @@ impl<'a> MacroLoader<'a> { return Vec::new(); } - let mut macros = Vec::new(); + let mut macros = self.creader.read_macros(vi); + let mut ret = Vec::new(); let mut seen = HashSet::new(); - for mut def in self.reader.read_exported_macros(vi) { + for mut def in macros.macro_rules.drain(..) { let name = def.ident.name.as_str(); def.use_locally = match import.as_ref() { @@ -133,10 +128,29 @@ impl<'a> MacroLoader<'a> { def.allow_internal_unstable = attr::contains_name(&def.attrs, "allow_internal_unstable"); debug!("load_macros: loaded: {:?}", def); - macros.push(def); + ret.push(LoadedMacro::Def(def)); seen.insert(name); } + if let Some(index) = macros.custom_derive_registrar { + // custom derive crates currently should not have any macro_rules! + // exported macros, enforced elsewhere + assert_eq!(ret.len(), 0); + + if import.is_some() { + self.sess.span_err(vi.span, "`rustc-macro` crates cannot be \ + selectively imported from, must \ + use `#[macro_use]`"); + } + + if reexport.len() > 0 { + self.sess.span_err(vi.span, "`rustc-macro` crates cannot be \ + reexported from"); + } + + self.load_derive_macros(vi.span, ¯os, index, &mut ret); + } + if let Some(sel) = import.as_ref() { for (name, span) in sel { if !seen.contains(&name) { @@ -153,6 +167,53 @@ impl<'a> MacroLoader<'a> { } } - macros + return ret + } + + /// Load the custom derive macros into the list of macros we're loading. + /// + /// Note that this is intentionally similar to how we load plugins today, + /// but also intentionally separate. Plugins are likely always going to be + /// implemented as dynamic libraries, but we have a possible future where + /// custom derive (and other macro-1.1 style features) are implemented via + /// executables and custom IPC. + fn load_derive_macros(&mut self, + span: Span, + macros: &Macros, + index: DefIndex, + ret: &mut Vec) { + // Make sure the path contains a / or the linker will search for it. + let path = macros.dylib.as_ref().unwrap(); + let path = env::current_dir().unwrap().join(path); + let lib = match DynamicLibrary::open(Some(&path)) { + Ok(lib) => lib, + Err(err) => self.sess.span_fatal(span, &err), + }; + + let sym = self.sess.generate_derive_registrar_symbol(¯os.svh, index); + let registrar = unsafe { + let sym = match lib.symbol(&sym) { + Ok(f) => f, + Err(err) => self.sess.span_fatal(span, &err), + }; + mem::transmute::<*mut u8, fn(&mut Registry)>(sym) + }; + + struct MyRegistrar<'a>(&'a mut Vec); + + impl<'a> Registry for MyRegistrar<'a> { + fn register_custom_derive(&mut self, + trait_name: &str, + expand: fn(TokenStream) -> TokenStream) { + let derive = Rc::new(CustomDerive::new(expand)); + self.0.push(LoadedMacro::CustomDerive(trait_name.to_string(), derive)); + } + } + + registrar(&mut MyRegistrar(ret)); + + // Intentionally leak the dynamic library. We can't ever unload it + // since the library can make things that will live arbitrarily long. + mem::forget(lib); } } diff --git a/src/librustc_metadata/macros.rs b/src/librustc_metadata/macros.rs deleted file mode 100644 index ed764ebd9f..0000000000 --- a/src/librustc_metadata/macros.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -macro_rules! enum_from_u32 { - ($(#[$attr:meta])* pub enum $name:ident { - $($variant:ident = $e:expr,)* - }) => { - $(#[$attr])* - pub enum $name { - $($variant = $e),* - } - - impl $name { - pub fn from_u32(u: u32) -> Option<$name> { - $(if u == $name::$variant as u32 { - return Some($name::$variant) - })* - None - } - } - }; - ($(#[$attr:meta])* pub enum $name:ident { - $($variant:ident,)* - }) => { - $(#[$attr])* - pub enum $name { - $($variant,)* - } - - impl $name { - pub fn from_u32(u: u32) -> Option<$name> { - $(if u == $name::$variant as u32 { - return Some($name::$variant) - })* - None - } - } - } -} diff --git a/src/librustc_metadata/schema.rs b/src/librustc_metadata/schema.rs new file mode 100644 index 0000000000..f4d1e8e17f --- /dev/null +++ b/src/librustc_metadata/schema.rs @@ -0,0 +1,349 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use astencode; +use index; + +use rustc::hir; +use rustc::hir::def; +use rustc::hir::def_id::{DefIndex, DefId}; +use rustc::middle::cstore::{LinkagePreference, NativeLibraryKind}; +use rustc::middle::lang_items; +use rustc::mir; +use rustc::ty::{self, Ty}; +use rustc::session::config::PanicStrategy; + +use rustc_serialize as serialize; +use syntax::{ast, attr}; +use syntax_pos::{self, Span}; + +use std::marker::PhantomData; + +#[cfg(not(test))] +pub const RUSTC_VERSION: &'static str = concat!("rustc ", env!("CFG_VERSION")); + +#[cfg(test)] +pub const RUSTC_VERSION: &'static str = "rustc 0.0.0-unit-test"; + +/// Metadata encoding version. +/// NB: increment this if you change the format of metadata such that +/// the rustc version can't be found to compare with `RUSTC_VERSION`. +pub const METADATA_VERSION: u8 = 3; + +/// Metadata header which includes `METADATA_VERSION`. +/// To get older versions of rustc to ignore this metadata, +/// there are 4 zero bytes at the start, which are treated +/// as a length of 0 by old compilers. +/// +/// This header is followed by the position of the `CrateRoot`. +pub const METADATA_HEADER: &'static [u8; 12] = &[ + 0, 0, 0, 0, + b'r', b'u', b's', b't', + 0, 0, 0, METADATA_VERSION +]; + +/// The shorthand encoding uses an enum's variant index `usize` +/// and is offset by this value so it never matches a real variant. +/// This offset is also chosen so that the first byte is never < 0x80. +pub const SHORTHAND_OFFSET: usize = 0x80; + +/// A value of type T referred to by its absolute position +/// in the metadata, and which can be decoded lazily. +/// +/// Metadata is effective a tree, encoded in post-order, +/// and with the root's position written next to the header. +/// That means every single `Lazy` points to some previous +/// location in the metadata and is part of a larger node. +/// +/// The first `Lazy` in a node is encoded as the backwards +/// distance from the position where the containing node +/// starts and where the `Lazy` points to, while the rest +/// use the forward distance from the previous `Lazy`. +/// Distances start at 1, as 0-byte nodes are invalid. +/// Also invalid are nodes being referred in a different +/// order than they were encoded in. +#[must_use] +pub struct Lazy { + pub position: usize, + _marker: PhantomData +} + +impl Lazy { + pub fn with_position(position: usize) -> Lazy { + Lazy { + position: position, + _marker: PhantomData + } + } + + /// Returns the minimum encoded size of a value of type `T`. + // FIXME(eddyb) Give better estimates for certain types. + pub fn min_size() -> usize { + 1 + } +} + +impl Copy for Lazy {} +impl Clone for Lazy { + fn clone(&self) -> Self { *self } +} + +impl serialize::UseSpecializedEncodable for Lazy {} +impl serialize::UseSpecializedDecodable for Lazy {} + +/// A sequence of type T referred to by its absolute position +/// in the metadata and length, and which can be decoded lazily. +/// The sequence is a single node for the purposes of `Lazy`. +/// +/// Unlike `Lazy>`, the length is encoded next to the +/// position, not at the position, which means that the length +/// doesn't need to be known before encoding all the elements. +/// +/// If the length is 0, no position is encoded, but otherwise, +/// the encoding is that of `Lazy`, with the distinction that +/// the minimal distance the length of the sequence, i.e. +/// it's assumed there's no 0-byte element in the sequence. +#[must_use] +pub struct LazySeq { + pub len: usize, + pub position: usize, + _marker: PhantomData +} + +impl LazySeq { + pub fn empty() -> LazySeq { + LazySeq::with_position_and_length(0, 0) + } + + pub fn with_position_and_length(position: usize, len: usize) -> LazySeq { + LazySeq { + len: len, + position: position, + _marker: PhantomData + } + } + + /// Returns the minimum encoded size of `length` values of type `T`. + pub fn min_size(length: usize) -> usize { + length + } +} + +impl Copy for LazySeq {} +impl Clone for LazySeq { + fn clone(&self) -> Self { *self } +} + +impl serialize::UseSpecializedEncodable for LazySeq {} +impl serialize::UseSpecializedDecodable for LazySeq {} + +/// Encoding / decoding state for `Lazy` and `LazySeq`. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum LazyState { + /// Outside of a metadata node. + NoNode, + + /// Inside a metadata node, and before any `Lazy` or `LazySeq`. + /// The position is that of the node itself. + NodeStart(usize), + + /// Inside a metadata node, with a previous `Lazy` or `LazySeq`. + /// The position is a conservative estimate of where that + /// previous `Lazy` / `LazySeq` would end (see their comments). + Previous(usize) +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct CrateRoot { + pub rustc_version: String, + pub name: String, + pub triple: String, + pub hash: hir::svh::Svh, + pub disambiguator: String, + pub panic_strategy: PanicStrategy, + pub plugin_registrar_fn: Option, + pub macro_derive_registrar: Option, + + pub crate_deps: LazySeq, + pub dylib_dependency_formats: LazySeq>, + pub lang_items: LazySeq<(DefIndex, usize)>, + pub lang_items_missing: LazySeq, + pub native_libraries: LazySeq<(NativeLibraryKind, String)>, + pub codemap: LazySeq, + pub macro_defs: LazySeq, + pub impls: LazySeq, + pub reachable_ids: LazySeq, + pub index: LazySeq, +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct CrateDep { + pub name: ast::Name, + pub hash: hir::svh::Svh, + pub explicitly_linked: bool +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct TraitImpls { + pub trait_id: (u32, DefIndex), + pub impls: LazySeq +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct MacroDef { + pub name: ast::Name, + pub attrs: Vec, + pub span: Span, + pub body: String +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct Entry<'tcx> { + pub kind: EntryKind<'tcx>, + pub visibility: ty::Visibility, + pub def_key: Lazy, + pub attributes: LazySeq, + pub children: LazySeq, + pub stability: Option>, + pub deprecation: Option>, + + pub ty: Option>>, + pub inherent_impls: LazySeq, + pub variances: LazySeq, + pub generics: Option>>, + pub predicates: Option>>, + + pub ast: Option>>, + pub mir: Option>> +} + +#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] +pub enum EntryKind<'tcx> { + Const, + ImmStatic, + MutStatic, + ForeignImmStatic, + ForeignMutStatic, + ForeignMod, + Type, + Enum, + Field, + Variant(Lazy), + Struct(Lazy), + Union(Lazy), + Fn(Lazy), + ForeignFn(Lazy), + Mod(Lazy), + Closure(Lazy>), + Trait(Lazy>), + Impl(Lazy>), + DefaultImpl(Lazy>), + Method(Lazy>), + AssociatedType(AssociatedContainer), + AssociatedConst(AssociatedContainer) +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct ModData { + pub reexports: LazySeq +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct FnData { + pub constness: hir::Constness, + pub arg_names: LazySeq +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct VariantData { + pub kind: ty::VariantKind, + pub disr: u64, + + /// If this is a struct's only variant, this + /// is the index of the "struct ctor" item. + pub struct_ctor: Option +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct TraitData<'tcx> { + pub unsafety: hir::Unsafety, + pub paren_sugar: bool, + pub has_default_impl: bool, + pub trait_ref: Lazy>, + pub super_predicates: Lazy> +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct ImplData<'tcx> { + pub polarity: hir::ImplPolarity, + pub parent_impl: Option, + pub coerce_unsized_kind: Option, + pub trait_ref: Option>> +} + +/// Describes whether the container of an associated item +/// is a trait or an impl and whether, in a trait, it has +/// a default, or an in impl, whether it's marked "default". +#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] +pub enum AssociatedContainer { + TraitRequired, + TraitWithDefault, + ImplDefault, + ImplFinal +} + +impl AssociatedContainer { + pub fn with_def_id(&self, def_id: DefId) -> ty::ImplOrTraitItemContainer { + match *self { + AssociatedContainer::TraitRequired | + AssociatedContainer::TraitWithDefault => { + ty::TraitContainer(def_id) + } + + AssociatedContainer::ImplDefault | + AssociatedContainer::ImplFinal => { + ty::ImplContainer(def_id) + } + } + } + + pub fn has_body(&self) -> bool { + match *self { + AssociatedContainer::TraitRequired => false, + + AssociatedContainer::TraitWithDefault | + AssociatedContainer::ImplDefault | + AssociatedContainer::ImplFinal => true + } + } + + pub fn defaultness(&self) -> hir::Defaultness { + match *self { + AssociatedContainer::TraitRequired | + AssociatedContainer::TraitWithDefault | + AssociatedContainer::ImplDefault => hir::Defaultness::Default, + + AssociatedContainer::ImplFinal => hir::Defaultness::Final + } + } +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct MethodData<'tcx> { + pub fn_data: FnData, + pub container: AssociatedContainer, + pub explicit_self: Lazy> +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct ClosureData<'tcx> { + pub kind: ty::ClosureKind, + pub ty: Lazy> +} diff --git a/src/librustc_metadata/tls_context.rs b/src/librustc_metadata/tls_context.rs deleted file mode 100644 index 23142ca80e..0000000000 --- a/src/librustc_metadata/tls_context.rs +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// This module provides implementations for the thread-local encoding and -// decoding context traits in rustc::middle::cstore::tls. - -use rbml::opaque::Encoder as OpaqueEncoder; -use rbml::opaque::Decoder as OpaqueDecoder; -use rustc::middle::cstore::tls; -use rustc::hir::def_id::DefId; -use rustc::ty::subst::Substs; -use rustc::ty::{self, TyCtxt}; - -use decoder::{self, Cmd}; -use encoder; -use tydecode::TyDecoder; -use tyencode; - -impl<'a, 'tcx: 'a> tls::EncodingContext<'tcx> for encoder::EncodeContext<'a, 'tcx> { - - fn tcx<'s>(&'s self) -> TyCtxt<'s, 'tcx, 'tcx> { - self.tcx - } - - fn encode_ty(&self, encoder: &mut OpaqueEncoder, t: ty::Ty<'tcx>) { - tyencode::enc_ty(encoder.cursor, &self.ty_str_ctxt(), t); - } - - fn encode_substs(&self, encoder: &mut OpaqueEncoder, substs: &Substs<'tcx>) { - tyencode::enc_substs(encoder.cursor, &self.ty_str_ctxt(), substs); - } -} - -pub struct DecodingContext<'a, 'tcx: 'a> { - pub crate_metadata: Cmd<'a>, - pub tcx: TyCtxt<'a, 'tcx, 'tcx>, -} - -impl<'a, 'tcx: 'a> tls::DecodingContext<'tcx> for DecodingContext<'a, 'tcx> { - - fn tcx<'s>(&'s self) -> TyCtxt<'s, 'tcx, 'tcx> { - self.tcx - } - - fn decode_ty(&self, decoder: &mut OpaqueDecoder) -> ty::Ty<'tcx> { - let def_id_convert = &mut |did| { - decoder::translate_def_id(self.crate_metadata, did) - }; - - let starting_position = decoder.position(); - - let mut ty_decoder = TyDecoder::new( - self.crate_metadata.data.as_slice(), - self.crate_metadata.cnum, - starting_position, - self.tcx, - def_id_convert); - - let ty = ty_decoder.parse_ty(); - - let end_position = ty_decoder.position(); - - // We can just reuse the tydecode implementation for parsing types, but - // we have to make sure to leave the rbml reader at the position just - // after the type. - decoder.advance(end_position - starting_position); - ty - } - - fn decode_substs(&self, decoder: &mut OpaqueDecoder) -> Substs<'tcx> { - let def_id_convert = &mut |did| { - decoder::translate_def_id(self.crate_metadata, did) - }; - - let starting_position = decoder.position(); - - let mut ty_decoder = TyDecoder::new( - self.crate_metadata.data.as_slice(), - self.crate_metadata.cnum, - starting_position, - self.tcx, - def_id_convert); - - let substs = ty_decoder.parse_substs(); - - let end_position = ty_decoder.position(); - - decoder.advance(end_position - starting_position); - substs - } - - fn translate_def_id(&self, def_id: DefId) -> DefId { - decoder::translate_def_id(self.crate_metadata, def_id) - } -} diff --git a/src/librustc_metadata/tydecode.rs b/src/librustc_metadata/tydecode.rs deleted file mode 100644 index 7b4919bb47..0000000000 --- a/src/librustc_metadata/tydecode.rs +++ /dev/null @@ -1,737 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - -// Type decoding - -// tjc note: Would be great to have a `match check` macro equivalent -// for some of these - -#![allow(non_camel_case_types)] - -use rustc::hir; - -use rustc::hir::def_id::{DefId, DefIndex}; -use middle::region; -use rustc::ty::subst; -use rustc::ty::subst::VecPerParamSpace; -use rustc::ty::{self, ToPredicate, Ty, TyCtxt, TypeFoldable}; - -use rbml; -use rbml::leb128; -use std::str; -use syntax::abi; -use syntax::ast; -use syntax::parse::token; - -// Compact string representation for Ty values. API TyStr & -// parse_from_str. Extra parameters are for converting to/from def_ids in the -// data buffer. Whatever format you choose should not contain pipe characters. - -pub type DefIdConvert<'a> = &'a mut FnMut(DefId) -> DefId; - -pub struct TyDecoder<'a, 'tcx: 'a> { - data: &'a [u8], - krate: ast::CrateNum, - pos: usize, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - conv_def_id: DefIdConvert<'a>, -} - -impl<'a,'tcx> TyDecoder<'a,'tcx> { - pub fn with_doc(tcx: TyCtxt<'a, 'tcx, 'tcx>, - crate_num: ast::CrateNum, - doc: rbml::Doc<'a>, - conv: DefIdConvert<'a>) - -> TyDecoder<'a,'tcx> { - TyDecoder::new(doc.data, crate_num, doc.start, tcx, conv) - } - - pub fn new(data: &'a [u8], - crate_num: ast::CrateNum, - pos: usize, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - conv: DefIdConvert<'a>) - -> TyDecoder<'a, 'tcx> { - TyDecoder { - data: data, - krate: crate_num, - pos: pos, - tcx: tcx, - conv_def_id: conv, - } - } - - pub fn position(&self) -> usize { - self.pos - } - - fn peek(&self) -> char { - self.data[self.pos] as char - } - - fn next(&mut self) -> char { - let ch = self.data[self.pos] as char; - self.pos = self.pos + 1; - return ch; - } - - fn next_byte(&mut self) -> u8 { - let b = self.data[self.pos]; - self.pos = self.pos + 1; - return b; - } - - fn scan(&mut self, mut is_last: F) -> &'a [u8] - where F: FnMut(char) -> bool, - { - let start_pos = self.pos; - debug!("scan: '{}' (start)", self.data[self.pos] as char); - while !is_last(self.data[self.pos] as char) { - self.pos += 1; - debug!("scan: '{}'", self.data[self.pos] as char); - } - let end_pos = self.pos; - self.pos += 1; - return &self.data[start_pos..end_pos]; - } - - fn parse_vuint(&mut self) -> usize { - let (value, bytes_read) = leb128::read_unsigned_leb128(self.data, - self.pos); - self.pos += bytes_read; - value as usize - } - - fn parse_name(&mut self, last: char) -> ast::Name { - fn is_last(b: char, c: char) -> bool { return c == b; } - let bytes = self.scan(|a| is_last(last, a)); - token::intern(str::from_utf8(bytes).unwrap()) - } - - fn parse_size(&mut self) -> Option { - assert_eq!(self.next(), '/'); - - if self.peek() == '|' { - assert_eq!(self.next(), '|'); - None - } else { - let n = self.parse_uint(); - assert_eq!(self.next(), '|'); - Some(n) - } - } - - fn parse_vec_per_param_space(&mut self, mut f: F) -> VecPerParamSpace where - F: FnMut(&mut TyDecoder<'a, 'tcx>) -> T, - { - let mut r = VecPerParamSpace::empty(); - for &space in &subst::ParamSpace::all() { - assert_eq!(self.next(), '['); - while self.peek() != ']' { - r.push(space, f(self)); - } - assert_eq!(self.next(), ']'); - } - r - } - - pub fn parse_substs(&mut self) -> subst::Substs<'tcx> { - let regions = self.parse_vec_per_param_space(|this| this.parse_region()); - let types = self.parse_vec_per_param_space(|this| this.parse_ty()); - subst::Substs { types: types, regions: regions } - } - - fn parse_bound_region(&mut self) -> ty::BoundRegion { - match self.next() { - 'a' => { - let id = self.parse_u32(); - assert_eq!(self.next(), '|'); - ty::BrAnon(id) - } - '[' => { - let def = self.parse_def(); - let name = token::intern(&self.parse_str('|')); - let issue32330 = match self.next() { - 'n' => { - assert_eq!(self.next(), ']'); - ty::Issue32330::WontChange - } - 'y' => { - ty::Issue32330::WillChange { - fn_def_id: self.parse_def(), - region_name: token::intern(&self.parse_str(']')), - } - } - c => panic!("expected n or y not {}", c) - }; - ty::BrNamed(def, name, issue32330) - } - 'f' => { - let id = self.parse_u32(); - assert_eq!(self.next(), '|'); - ty::BrFresh(id) - } - 'e' => ty::BrEnv, - _ => bug!("parse_bound_region: bad input") - } - } - - pub fn parse_region(&mut self) -> ty::Region { - match self.next() { - 'b' => { - assert_eq!(self.next(), '['); - let id = ty::DebruijnIndex::new(self.parse_u32()); - assert_eq!(self.next(), '|'); - let br = self.parse_bound_region(); - assert_eq!(self.next(), ']'); - ty::ReLateBound(id, br) - } - 'B' => { - assert_eq!(self.next(), '['); - let space = self.parse_param_space(); - assert_eq!(self.next(), '|'); - let index = self.parse_u32(); - assert_eq!(self.next(), '|'); - let name = token::intern(&self.parse_str(']')); - ty::ReEarlyBound(ty::EarlyBoundRegion { - space: space, - index: index, - name: name - }) - } - 'f' => { - assert_eq!(self.next(), '['); - let scope = self.parse_scope(); - assert_eq!(self.next(), '|'); - let br = self.parse_bound_region(); - assert_eq!(self.next(), ']'); - ty::ReFree(ty::FreeRegion { scope: scope, - bound_region: br}) - } - 's' => { - let scope = self.parse_scope(); - assert_eq!(self.next(), '|'); - ty::ReScope(scope) - } - 't' => ty::ReStatic, - 'e' => ty::ReEmpty, - 'E' => ty::ReErased, - _ => bug!("parse_region: bad input") - } - } - - fn parse_scope(&mut self) -> region::CodeExtent { - self.tcx.region_maps.bogus_code_extent(match self.next() { - // This creates scopes with the wrong NodeId. This isn't - // actually a problem because scopes only exist *within* - // functions, and functions aren't loaded until trans which - // doesn't care about regions. - // - // May still be worth fixing though. - 'C' => { - assert_eq!(self.next(), '['); - let fn_id = self.parse_uint() as ast::NodeId; - assert_eq!(self.next(), '|'); - let body_id = self.parse_uint() as ast::NodeId; - assert_eq!(self.next(), ']'); - region::CodeExtentData::CallSiteScope { - fn_id: fn_id, body_id: body_id - } - } - // This creates scopes with the wrong NodeId. (See note above.) - 'P' => { - assert_eq!(self.next(), '['); - let fn_id = self.parse_uint() as ast::NodeId; - assert_eq!(self.next(), '|'); - let body_id = self.parse_uint() as ast::NodeId; - assert_eq!(self.next(), ']'); - region::CodeExtentData::ParameterScope { - fn_id: fn_id, body_id: body_id - } - } - 'M' => { - let node_id = self.parse_uint() as ast::NodeId; - region::CodeExtentData::Misc(node_id) - } - 'D' => { - let node_id = self.parse_uint() as ast::NodeId; - region::CodeExtentData::DestructionScope(node_id) - } - 'B' => { - assert_eq!(self.next(), '['); - let node_id = self.parse_uint() as ast::NodeId; - assert_eq!(self.next(), '|'); - let first_stmt_index = self.parse_u32(); - assert_eq!(self.next(), ']'); - let block_remainder = region::BlockRemainder { - block: node_id, first_statement_index: first_stmt_index, - }; - region::CodeExtentData::Remainder(block_remainder) - } - _ => bug!("parse_scope: bad input") - }) - } - - fn parse_opt(&mut self, f: F) -> Option - where F: FnOnce(&mut TyDecoder<'a, 'tcx>) -> T, - { - match self.next() { - 'n' => None, - 's' => Some(f(self)), - _ => bug!("parse_opt: bad input") - } - } - - fn parse_str(&mut self, term: char) -> String { - let mut result = String::new(); - while self.peek() != term { - unsafe { - result.as_mut_vec().extend_from_slice(&[self.next_byte()]) - } - } - self.next(); - result - } - - pub fn parse_trait_ref(&mut self) -> ty::TraitRef<'tcx> { - let def = self.parse_def(); - let substs = self.tcx.mk_substs(self.parse_substs()); - ty::TraitRef {def_id: def, substs: substs} - } - - pub fn parse_ty(&mut self) -> Ty<'tcx> { - let tcx = self.tcx; - match self.next() { - 'b' => return tcx.types.bool, - '!' => return tcx.types.never, - 'i' => { /* eat the s of is */ self.next(); return tcx.types.isize }, - 'u' => { /* eat the s of us */ self.next(); return tcx.types.usize }, - 'M' => { - match self.next() { - 'b' => return tcx.types.u8, - 'w' => return tcx.types.u16, - 'l' => return tcx.types.u32, - 'd' => return tcx.types.u64, - 'B' => return tcx.types.i8, - 'W' => return tcx.types.i16, - 'L' => return tcx.types.i32, - 'D' => return tcx.types.i64, - 'f' => return tcx.types.f32, - 'F' => return tcx.types.f64, - _ => bug!("parse_ty: bad numeric type") - } - } - 'c' => return tcx.types.char, - 't' => { - assert_eq!(self.next(), '['); - let did = self.parse_def(); - let substs = self.parse_substs(); - assert_eq!(self.next(), ']'); - let def = self.tcx.lookup_adt_def(did); - return tcx.mk_enum(def, self.tcx.mk_substs(substs)); - } - 'x' => { - assert_eq!(self.next(), '['); - let trait_ref = ty::Binder(self.parse_trait_ref()); - let bounds = self.parse_existential_bounds(); - assert_eq!(self.next(), ']'); - return tcx.mk_trait(trait_ref, bounds); - } - 'p' => { - assert_eq!(self.next(), '['); - let index = self.parse_u32(); - assert_eq!(self.next(), '|'); - let space = self.parse_param_space(); - assert_eq!(self.next(), '|'); - let name = token::intern(&self.parse_str(']')); - return tcx.mk_param(space, index, name); - } - '~' => return tcx.mk_box(self.parse_ty()), - '*' => return tcx.mk_ptr(self.parse_mt()), - '&' => { - let r = self.parse_region(); - let mt = self.parse_mt(); - return tcx.mk_ref(tcx.mk_region(r), mt); - } - 'V' => { - let t = self.parse_ty(); - return match self.parse_size() { - Some(n) => tcx.mk_array(t, n), - None => tcx.mk_slice(t) - }; - } - 'v' => { - return tcx.mk_str(); - } - 'T' => { - assert_eq!(self.next(), '['); - let mut params = Vec::new(); - while self.peek() != ']' { params.push(self.parse_ty()); } - self.pos = self.pos + 1; - return tcx.mk_tup(params); - } - 'F' => { - let def_id = self.parse_def(); - let substs = self.tcx.mk_substs(self.parse_substs()); - return tcx.mk_fn_def(def_id, substs, self.parse_bare_fn_ty()); - } - 'G' => { - return tcx.mk_fn_ptr(self.parse_bare_fn_ty()); - } - '#' => { - // This is a hacky little caching scheme. The idea is that if we encode - // the same type twice, the second (and third, and fourth...) time we will - // just write `#123`, where `123` is the offset in the metadata of the - // first appearance. Now when we are *decoding*, if we see a `#123`, we - // can first check a cache (`tcx.rcache`) for that offset. If we find something, - // we return it (modulo closure types, see below). But if not, then we - // jump to offset 123 and read the type from there. - - let pos = self.parse_vuint(); - let key = ty::CReaderCacheKey { cnum: self.krate, pos: pos }; - if let Some(tt) = tcx.rcache.borrow().get(&key).cloned() { - // If there is a closure buried in the type some where, then we - // need to re-convert any def ids (see case 'k', below). That means - // we can't reuse the cached version. - if !tt.has_closure_types() { - return tt; - } - } - - let mut substate = TyDecoder::new(self.data, - self.krate, - pos, - self.tcx, - self.conv_def_id); - let tt = substate.parse_ty(); - tcx.rcache.borrow_mut().insert(key, tt); - return tt; - } - '\"' => { - let _ = self.parse_def(); - let inner = self.parse_ty(); - inner - } - 'a' => { - assert_eq!(self.next(), '['); - let did = self.parse_def(); - let substs = self.parse_substs(); - assert_eq!(self.next(), ']'); - let def = self.tcx.lookup_adt_def(did); - return self.tcx.mk_struct(def, self.tcx.mk_substs(substs)); - } - 'k' => { - assert_eq!(self.next(), '['); - let did = self.parse_def(); - let substs = self.parse_substs(); - let mut tys = vec![]; - while self.peek() != '.' { - tys.push(self.parse_ty()); - } - assert_eq!(self.next(), '.'); - assert_eq!(self.next(), ']'); - return self.tcx.mk_closure(did, self.tcx.mk_substs(substs), tys); - } - 'P' => { - assert_eq!(self.next(), '['); - let trait_ref = self.parse_trait_ref(); - let name = token::intern(&self.parse_str(']')); - return tcx.mk_projection(trait_ref, name); - } - 'A' => { - assert_eq!(self.next(), '['); - let def_id = self.parse_def(); - let substs = self.parse_substs(); - assert_eq!(self.next(), ']'); - return self.tcx.mk_anon(def_id, self.tcx.mk_substs(substs)); - } - 'e' => { - return tcx.types.err; - } - c => { bug!("unexpected char in type string: {}", c);} - } - } - - fn parse_mutability(&mut self) -> hir::Mutability { - match self.peek() { - 'm' => { self.next(); hir::MutMutable } - _ => { hir::MutImmutable } - } - } - - fn parse_mt(&mut self) -> ty::TypeAndMut<'tcx> { - let m = self.parse_mutability(); - ty::TypeAndMut { ty: self.parse_ty(), mutbl: m } - } - - fn parse_def(&mut self) -> DefId { - let def_id = parse_defid(self.scan(|c| c == '|')); - return (self.conv_def_id)(def_id); - } - - fn parse_uint(&mut self) -> usize { - let mut n = 0; - loop { - let cur = self.peek(); - if cur < '0' || cur > '9' { return n; } - self.pos = self.pos + 1; - n *= 10; - n += (cur as usize) - ('0' as usize); - }; - } - - fn parse_u32(&mut self) -> u32 { - let n = self.parse_uint(); - let m = n as u32; - assert_eq!(m as usize, n); - m - } - - fn parse_param_space(&mut self) -> subst::ParamSpace { - subst::ParamSpace::from_uint(self.parse_uint()) - } - - fn parse_abi_set(&mut self) -> abi::Abi { - assert_eq!(self.next(), '['); - let bytes = self.scan(|c| c == ']'); - let abi_str = str::from_utf8(bytes).unwrap(); - abi::lookup(&abi_str[..]).expect(abi_str) - } - - pub fn parse_closure_ty(&mut self) -> ty::ClosureTy<'tcx> { - let unsafety = parse_unsafety(self.next()); - let sig = self.parse_sig(); - let abi = self.parse_abi_set(); - ty::ClosureTy { - unsafety: unsafety, - sig: sig, - abi: abi, - } - } - - pub fn parse_bare_fn_ty(&mut self) -> &'tcx ty::BareFnTy<'tcx> { - let unsafety = parse_unsafety(self.next()); - let abi = self.parse_abi_set(); - let sig = self.parse_sig(); - self.tcx.mk_bare_fn(ty::BareFnTy { - unsafety: unsafety, - abi: abi, - sig: sig - }) - } - - fn parse_sig(&mut self) -> ty::PolyFnSig<'tcx> { - assert_eq!(self.next(), '['); - let mut inputs = Vec::new(); - while self.peek() != ']' { - inputs.push(self.parse_ty()); - } - self.pos += 1; // eat the ']' - let variadic = match self.next() { - 'V' => true, - 'N' => false, - r => bug!("bad variadic: {}", r), - }; - let output = self.parse_ty(); - ty::Binder(ty::FnSig {inputs: inputs, - output: output, - variadic: variadic}) - } - - pub fn parse_predicate(&mut self) -> ty::Predicate<'tcx> { - match self.next() { - 't' => ty::Binder(self.parse_trait_ref()).to_predicate(), - 'e' => ty::Binder(ty::EquatePredicate(self.parse_ty(), - self.parse_ty())).to_predicate(), - 'r' => ty::Binder(ty::OutlivesPredicate(self.parse_region(), - self.parse_region())).to_predicate(), - 'o' => ty::Binder(ty::OutlivesPredicate(self.parse_ty(), - self.parse_region())).to_predicate(), - 'p' => ty::Binder(self.parse_projection_predicate()).to_predicate(), - 'w' => ty::Predicate::WellFormed(self.parse_ty()), - 'O' => { - let def_id = self.parse_def(); - assert_eq!(self.next(), '|'); - ty::Predicate::ObjectSafe(def_id) - } - 'c' => { - let def_id = self.parse_def(); - assert_eq!(self.next(), '|'); - let kind = match self.next() { - 'f' => ty::ClosureKind::Fn, - 'm' => ty::ClosureKind::FnMut, - 'o' => ty::ClosureKind::FnOnce, - c => bug!("Encountered invalid character in metadata: {}", c) - }; - assert_eq!(self.next(), '|'); - ty::Predicate::ClosureKind(def_id, kind) - } - c => bug!("Encountered invalid character in metadata: {}", c) - } - } - - fn parse_projection_predicate(&mut self) -> ty::ProjectionPredicate<'tcx> { - ty::ProjectionPredicate { - projection_ty: ty::ProjectionTy { - trait_ref: self.parse_trait_ref(), - item_name: token::intern(&self.parse_str('|')), - }, - ty: self.parse_ty(), - } - } - - pub fn parse_type_param_def(&mut self) -> ty::TypeParameterDef<'tcx> { - let name = self.parse_name(':'); - let def_id = self.parse_def(); - let space = self.parse_param_space(); - assert_eq!(self.next(), '|'); - let index = self.parse_u32(); - assert_eq!(self.next(), '|'); - let default_def_id = self.parse_def(); - let default = self.parse_opt(|this| this.parse_ty()); - let object_lifetime_default = self.parse_object_lifetime_default(); - - ty::TypeParameterDef { - name: name, - def_id: def_id, - space: space, - index: index, - default_def_id: default_def_id, - default: default, - object_lifetime_default: object_lifetime_default, - } - } - - pub fn parse_region_param_def(&mut self) -> ty::RegionParameterDef { - let name = self.parse_name(':'); - let def_id = self.parse_def(); - let space = self.parse_param_space(); - assert_eq!(self.next(), '|'); - let index = self.parse_u32(); - assert_eq!(self.next(), '|'); - let mut bounds = vec![]; - loop { - match self.next() { - 'R' => bounds.push(self.parse_region()), - '.' => { break; } - c => { - bug!("parse_region_param_def: bad bounds ('{}')", c) - } - } - } - ty::RegionParameterDef { - name: name, - def_id: def_id, - space: space, - index: index, - bounds: bounds, - } - } - - - fn parse_object_lifetime_default(&mut self) -> ty::ObjectLifetimeDefault { - match self.next() { - 'a' => ty::ObjectLifetimeDefault::Ambiguous, - 'b' => ty::ObjectLifetimeDefault::BaseDefault, - 's' => { - let region = self.parse_region(); - ty::ObjectLifetimeDefault::Specific(region) - } - _ => bug!("parse_object_lifetime_default: bad input") - } - } - - pub fn parse_existential_bounds(&mut self) -> ty::ExistentialBounds<'tcx> { - let builtin_bounds = self.parse_builtin_bounds(); - let region_bound = self.parse_region(); - let mut projection_bounds = Vec::new(); - - loop { - match self.next() { - 'P' => { - projection_bounds.push(ty::Binder(self.parse_projection_predicate())); - } - '.' => { break; } - c => { - bug!("parse_bounds: bad bounds ('{}')", c) - } - } - } - - ty::ExistentialBounds::new( - region_bound, builtin_bounds, projection_bounds) - } - - fn parse_builtin_bounds(&mut self) -> ty::BuiltinBounds { - let mut builtin_bounds = ty::BuiltinBounds::empty(); - loop { - match self.next() { - 'S' => { - builtin_bounds.insert(ty::BoundSend); - } - 'Z' => { - builtin_bounds.insert(ty::BoundSized); - } - 'P' => { - builtin_bounds.insert(ty::BoundCopy); - } - 'T' => { - builtin_bounds.insert(ty::BoundSync); - } - '.' => { - return builtin_bounds; - } - c => { - bug!("parse_bounds: bad builtin bounds ('{}')", c) - } - } - } - } -} - -// Rust metadata parsing -fn parse_defid(buf: &[u8]) -> DefId { - let mut colon_idx = 0; - let len = buf.len(); - while colon_idx < len && buf[colon_idx] != ':' as u8 { colon_idx += 1; } - if colon_idx == len { - error!("didn't find ':' when parsing def id"); - bug!(); - } - - let crate_part = &buf[0..colon_idx]; - let def_part = &buf[colon_idx + 1..len]; - - let crate_num = match str::from_utf8(crate_part).ok().and_then(|s| { - s.parse::().ok() - }) { - Some(cn) => cn as ast::CrateNum, - None => bug!("internal error: parse_defid: crate number expected, found {:?}", - crate_part) - }; - let def_num = match str::from_utf8(def_part).ok().and_then(|s| { - s.parse::().ok() - }) { - Some(dn) => dn, - None => bug!("internal error: parse_defid: id expected, found {:?}", - def_part) - }; - let index = DefIndex::new(def_num); - DefId { krate: crate_num, index: index } -} - -fn parse_unsafety(c: char) -> hir::Unsafety { - match c { - 'u' => hir::Unsafety::Unsafe, - 'n' => hir::Unsafety::Normal, - _ => bug!("parse_unsafety: bad unsafety {}", c) - } -} diff --git a/src/librustc_metadata/tyencode.rs b/src/librustc_metadata/tyencode.rs deleted file mode 100644 index 15bafcdd3c..0000000000 --- a/src/librustc_metadata/tyencode.rs +++ /dev/null @@ -1,518 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Type encoding - -#![allow(unused_must_use)] // as with encoding, everything is a no-fail MemWriter -#![allow(non_camel_case_types)] - -use std::cell::RefCell; -use std::io::Cursor; -use std::io::prelude::*; - -use rustc::hir::def_id::DefId; -use middle::region; -use rustc::ty::subst; -use rustc::ty::subst::VecPerParamSpace; -use rustc::ty::ParamTy; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::util::nodemap::FnvHashMap; - -use rustc::hir; - -use syntax::abi::Abi; -use syntax::ast; -use errors::Handler; - -use rbml::leb128; -use encoder; - -pub struct ctxt<'a, 'tcx: 'a> { - pub diag: &'a Handler, - // Def -> str Callback: - pub ds: for<'b> fn(TyCtxt<'b, 'tcx, 'tcx>, DefId) -> String, - // The type context. - pub tcx: TyCtxt<'a, 'tcx, 'tcx>, - pub abbrevs: &'a abbrev_map<'tcx> -} - -impl<'a, 'tcx> encoder::EncodeContext<'a, 'tcx> { - pub fn ty_str_ctxt<'b>(&'b self) -> ctxt<'b, 'tcx> { - ctxt { - diag: self.tcx.sess.diagnostic(), - ds: encoder::def_to_string, - tcx: self.tcx, - abbrevs: &self.type_abbrevs - } - } -} - -// Compact string representation for Ty values. API TyStr & parse_from_str. -// Extra parameters are for converting to/from def_ids in the string rep. -// Whatever format you choose should not contain pipe characters. -pub struct ty_abbrev { - s: Vec -} - -pub type abbrev_map<'tcx> = RefCell, ty_abbrev>>; - -pub fn enc_ty<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, t: Ty<'tcx>) { - if let Some(a) = cx.abbrevs.borrow_mut().get(&t) { - w.write_all(&a.s); - return; - } - - let pos = w.position(); - - match t.sty { - ty::TyBool => { write!(w, "b"); } - ty::TyChar => { write!(w, "c"); } - ty::TyNever => { write!(w, "!"); } - ty::TyInt(t) => { - match t { - ast::IntTy::Is => write!(w, "is"), - ast::IntTy::I8 => write!(w, "MB"), - ast::IntTy::I16 => write!(w, "MW"), - ast::IntTy::I32 => write!(w, "ML"), - ast::IntTy::I64 => write!(w, "MD") - }; - } - ty::TyUint(t) => { - match t { - ast::UintTy::Us => write!(w, "us"), - ast::UintTy::U8 => write!(w, "Mb"), - ast::UintTy::U16 => write!(w, "Mw"), - ast::UintTy::U32 => write!(w, "Ml"), - ast::UintTy::U64 => write!(w, "Md") - }; - } - ty::TyFloat(t) => { - match t { - ast::FloatTy::F32 => write!(w, "Mf"), - ast::FloatTy::F64 => write!(w, "MF"), - }; - } - ty::TyEnum(def, substs) => { - write!(w, "t[{}|", (cx.ds)(cx.tcx, def.did)); - enc_substs(w, cx, substs); - write!(w, "]"); - } - ty::TyTrait(box ty::TraitTy { ref principal, - ref bounds }) => { - write!(w, "x["); - enc_trait_ref(w, cx, principal.0); - enc_existential_bounds(w, cx, bounds); - write!(w, "]"); - } - ty::TyTuple(ts) => { - write!(w, "T["); - for t in ts { enc_ty(w, cx, *t); } - write!(w, "]"); - } - ty::TyBox(typ) => { write!(w, "~"); enc_ty(w, cx, typ); } - ty::TyRawPtr(mt) => { write!(w, "*"); enc_mt(w, cx, mt); } - ty::TyRef(r, mt) => { - write!(w, "&"); - enc_region(w, cx, *r); - enc_mt(w, cx, mt); - } - ty::TyArray(t, sz) => { - write!(w, "V"); - enc_ty(w, cx, t); - write!(w, "/{}|", sz); - } - ty::TySlice(t) => { - write!(w, "V"); - enc_ty(w, cx, t); - write!(w, "/|"); - } - ty::TyStr => { - write!(w, "v"); - } - ty::TyFnDef(def_id, substs, f) => { - write!(w, "F"); - write!(w, "{}|", (cx.ds)(cx.tcx, def_id)); - enc_substs(w, cx, substs); - enc_bare_fn_ty(w, cx, f); - } - ty::TyFnPtr(f) => { - write!(w, "G"); - enc_bare_fn_ty(w, cx, f); - } - ty::TyInfer(_) => { - bug!("cannot encode inference variable types"); - } - ty::TyParam(ParamTy {space, idx, name}) => { - write!(w, "p[{}|{}|{}]", idx, space.to_uint(), name); - } - ty::TyStruct(def, substs) => { - write!(w, "a[{}|", (cx.ds)(cx.tcx, def.did)); - enc_substs(w, cx, substs); - write!(w, "]"); - } - ty::TyClosure(def, substs) => { - write!(w, "k[{}|", (cx.ds)(cx.tcx, def)); - enc_substs(w, cx, substs.func_substs); - for ty in substs.upvar_tys { - enc_ty(w, cx, ty); - } - write!(w, "."); - write!(w, "]"); - } - ty::TyProjection(ref data) => { - write!(w, "P["); - enc_trait_ref(w, cx, data.trait_ref); - write!(w, "{}]", data.item_name); - } - ty::TyAnon(def_id, substs) => { - write!(w, "A[{}|", (cx.ds)(cx.tcx, def_id)); - enc_substs(w, cx, substs); - write!(w, "]"); - } - ty::TyError => { - write!(w, "e"); - } - } - - let end = w.position(); - let len = end - pos; - - let mut abbrev = Cursor::new(Vec::with_capacity(16)); - abbrev.write_all(b"#"); - { - let start_position = abbrev.position() as usize; - let bytes_written = leb128::write_unsigned_leb128(abbrev.get_mut(), - start_position, - pos); - abbrev.set_position((start_position + bytes_written) as u64); - } - - cx.abbrevs.borrow_mut().insert(t, ty_abbrev { - s: if abbrev.position() < len { - abbrev.get_ref()[..abbrev.position() as usize].to_owned() - } else { - // if the abbreviation is longer than the real type, - // don't use #-notation. However, insert it here so - // other won't have to `mark_stable_position` - w.get_ref()[pos as usize .. end as usize].to_owned() - } - }); -} - -fn enc_mutability(w: &mut Cursor>, mt: hir::Mutability) { - match mt { - hir::MutImmutable => (), - hir::MutMutable => { - write!(w, "m"); - } - }; -} - -fn enc_mt<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - mt: ty::TypeAndMut<'tcx>) { - enc_mutability(w, mt.mutbl); - enc_ty(w, cx, mt.ty); -} - -fn enc_opt(w: &mut Cursor>, t: Option, enc_f: F) where - F: FnOnce(&mut Cursor>, T), -{ - match t { - None => { - write!(w, "n"); - } - Some(v) => { - write!(w, "s"); - enc_f(w, v); - } - } -} - -fn enc_vec_per_param_space<'a, 'tcx, T, F>(w: &mut Cursor>, - cx: &ctxt<'a, 'tcx>, - v: &VecPerParamSpace, - mut op: F) where - F: FnMut(&mut Cursor>, &ctxt<'a, 'tcx>, &T), -{ - for &space in &subst::ParamSpace::all() { - write!(w, "["); - for t in v.get_slice(space) { - op(w, cx, t); - } - write!(w, "]"); - } -} - -pub fn enc_substs<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - substs: &subst::Substs<'tcx>) { - enc_vec_per_param_space(w, cx, &substs.regions, - |w, cx, &r| enc_region(w, cx, r)); - enc_vec_per_param_space(w, cx, &substs.types, - |w, cx, &ty| enc_ty(w, cx, ty)); -} - -pub fn enc_region(w: &mut Cursor>, cx: &ctxt, r: ty::Region) { - match r { - ty::ReLateBound(id, br) => { - write!(w, "b[{}|", id.depth); - enc_bound_region(w, cx, br); - write!(w, "]"); - } - ty::ReEarlyBound(ref data) => { - write!(w, "B[{}|{}|{}]", - data.space.to_uint(), - data.index, - data.name); - } - ty::ReFree(ref fr) => { - write!(w, "f["); - enc_scope(w, cx, fr.scope); - write!(w, "|"); - enc_bound_region(w, cx, fr.bound_region); - write!(w, "]"); - } - ty::ReScope(scope) => { - write!(w, "s"); - enc_scope(w, cx, scope); - write!(w, "|"); - } - ty::ReStatic => { - write!(w, "t"); - } - ty::ReEmpty => { - write!(w, "e"); - } - ty::ReErased => { - write!(w, "E"); - } - ty::ReVar(_) | ty::ReSkolemized(..) => { - // these should not crop up after typeck - bug!("cannot encode region variables"); - } - } -} - -fn enc_scope(w: &mut Cursor>, cx: &ctxt, scope: region::CodeExtent) { - match cx.tcx.region_maps.code_extent_data(scope) { - region::CodeExtentData::CallSiteScope { - fn_id, body_id } => write!(w, "C[{}|{}]", fn_id, body_id), - region::CodeExtentData::ParameterScope { - fn_id, body_id } => write!(w, "P[{}|{}]", fn_id, body_id), - region::CodeExtentData::Misc(node_id) => write!(w, "M{}", node_id), - region::CodeExtentData::Remainder(region::BlockRemainder { - block: b, first_statement_index: i }) => write!(w, "B[{}|{}]", b, i), - region::CodeExtentData::DestructionScope(node_id) => write!(w, "D{}", node_id), - }; -} - -fn enc_bound_region(w: &mut Cursor>, cx: &ctxt, br: ty::BoundRegion) { - match br { - ty::BrAnon(idx) => { - write!(w, "a{}|", idx); - } - ty::BrNamed(d, name, issue32330) => { - write!(w, "[{}|{}|", - (cx.ds)(cx.tcx, d), - name); - - match issue32330 { - ty::Issue32330::WontChange => - write!(w, "n]"), - ty::Issue32330::WillChange { fn_def_id, region_name } => - write!(w, "y{}|{}]", (cx.ds)(cx.tcx, fn_def_id), region_name), - }; - } - ty::BrFresh(id) => { - write!(w, "f{}|", id); - } - ty::BrEnv => { - write!(w, "e|"); - } - } -} - -pub fn enc_trait_ref<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - s: ty::TraitRef<'tcx>) { - write!(w, "{}|", (cx.ds)(cx.tcx, s.def_id)); - enc_substs(w, cx, s.substs); -} - -fn enc_unsafety(w: &mut Cursor>, p: hir::Unsafety) { - match p { - hir::Unsafety::Normal => write!(w, "n"), - hir::Unsafety::Unsafe => write!(w, "u"), - }; -} - -fn enc_abi(w: &mut Cursor>, abi: Abi) { - write!(w, "["); - write!(w, "{}", abi.name()); - write!(w, "]"); -} - -pub fn enc_bare_fn_ty<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - ft: &ty::BareFnTy<'tcx>) { - enc_unsafety(w, ft.unsafety); - enc_abi(w, ft.abi); - enc_fn_sig(w, cx, &ft.sig); -} - -pub fn enc_closure_ty<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - ft: &ty::ClosureTy<'tcx>) { - enc_unsafety(w, ft.unsafety); - enc_fn_sig(w, cx, &ft.sig); - enc_abi(w, ft.abi); -} - -fn enc_fn_sig<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - fsig: &ty::PolyFnSig<'tcx>) { - write!(w, "["); - for ty in &fsig.0.inputs { - enc_ty(w, cx, *ty); - } - write!(w, "]"); - if fsig.0.variadic { - write!(w, "V"); - } else { - write!(w, "N"); - } - enc_ty(w, cx, fsig.0.output); -} - -pub fn enc_builtin_bounds(w: &mut Cursor>, _cx: &ctxt, bs: &ty::BuiltinBounds) { - for bound in bs { - match bound { - ty::BoundSend => write!(w, "S"), - ty::BoundSized => write!(w, "Z"), - ty::BoundCopy => write!(w, "P"), - ty::BoundSync => write!(w, "T"), - }; - } - - write!(w, "."); -} - -pub fn enc_existential_bounds<'a,'tcx>(w: &mut Cursor>, - cx: &ctxt<'a,'tcx>, - bs: &ty::ExistentialBounds<'tcx>) { - enc_builtin_bounds(w, cx, &bs.builtin_bounds); - - enc_region(w, cx, bs.region_bound); - - // Encode projection_bounds in a stable order - let mut projection_bounds: Vec<_> = bs.projection_bounds - .iter() - .map(|b| (b.item_name().as_str(), b)) - .collect(); - projection_bounds.sort_by_key(|&(ref name, _)| name.clone()); - - for tp in projection_bounds.iter().map(|&(_, tp)| tp) { - write!(w, "P"); - enc_projection_predicate(w, cx, &tp.0); - } - - write!(w, "."); -} - -pub fn enc_type_param_def<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - v: &ty::TypeParameterDef<'tcx>) { - write!(w, "{}:{}|{}|{}|{}|", - v.name, (cx.ds)(cx.tcx, v.def_id), - v.space.to_uint(), v.index, (cx.ds)(cx.tcx, v.default_def_id)); - enc_opt(w, v.default, |w, t| enc_ty(w, cx, t)); - enc_object_lifetime_default(w, cx, v.object_lifetime_default); -} - -pub fn enc_region_param_def(w: &mut Cursor>, cx: &ctxt, - v: &ty::RegionParameterDef) { - write!(w, "{}:{}|{}|{}|", - v.name, (cx.ds)(cx.tcx, v.def_id), - v.space.to_uint(), v.index); - for &r in &v.bounds { - write!(w, "R"); - enc_region(w, cx, r); - } - write!(w, "."); -} - -fn enc_object_lifetime_default<'a, 'tcx>(w: &mut Cursor>, - cx: &ctxt<'a, 'tcx>, - default: ty::ObjectLifetimeDefault) -{ - match default { - ty::ObjectLifetimeDefault::Ambiguous => { - write!(w, "a"); - } - ty::ObjectLifetimeDefault::BaseDefault => { - write!(w, "b"); - } - ty::ObjectLifetimeDefault::Specific(r) => { - write!(w, "s"); - enc_region(w, cx, r); - } - } -} - -pub fn enc_predicate<'a, 'tcx>(w: &mut Cursor>, - cx: &ctxt<'a, 'tcx>, - p: &ty::Predicate<'tcx>) -{ - match *p { - ty::Predicate::Rfc1592(..) => { - bug!("RFC1592 predicate in metadata `{:?}`", p); - } - ty::Predicate::Trait(ref trait_ref) => { - write!(w, "t"); - enc_trait_ref(w, cx, trait_ref.0.trait_ref); - } - ty::Predicate::Equate(ty::Binder(ty::EquatePredicate(a, b))) => { - write!(w, "e"); - enc_ty(w, cx, a); - enc_ty(w, cx, b); - } - ty::Predicate::RegionOutlives(ty::Binder(ty::OutlivesPredicate(a, b))) => { - write!(w, "r"); - enc_region(w, cx, a); - enc_region(w, cx, b); - } - ty::Predicate::TypeOutlives(ty::Binder(ty::OutlivesPredicate(a, b))) => { - write!(w, "o"); - enc_ty(w, cx, a); - enc_region(w, cx, b); - } - ty::Predicate::Projection(ty::Binder(ref data)) => { - write!(w, "p"); - enc_projection_predicate(w, cx, data); - } - ty::Predicate::WellFormed(data) => { - write!(w, "w"); - enc_ty(w, cx, data); - } - ty::Predicate::ObjectSafe(trait_def_id) => { - write!(w, "O{}|", (cx.ds)(cx.tcx, trait_def_id)); - } - ty::Predicate::ClosureKind(closure_def_id, kind) => { - let kind_char = match kind { - ty::ClosureKind::Fn => 'f', - ty::ClosureKind::FnMut => 'm', - ty::ClosureKind::FnOnce => 'o', - }; - write!(w, "c{}|{}|", (cx.ds)(cx.tcx, closure_def_id), kind_char); - } - } -} - -fn enc_projection_predicate<'a, 'tcx>(w: &mut Cursor>, - cx: &ctxt<'a, 'tcx>, - data: &ty::ProjectionPredicate<'tcx>) { - enc_trait_ref(w, cx, data.projection_ty.trait_ref); - write!(w, "{}|", data.projection_ty.item_name); - enc_ty(w, cx, data.ty); -} diff --git a/src/librustc_mir/build/cfg.rs b/src/librustc_mir/build/cfg.rs index 83f8c3b42c..026a79b32b 100644 --- a/src/librustc_mir/build/cfg.rs +++ b/src/librustc_mir/build/cfg.rs @@ -13,7 +13,7 @@ //! Routines for manipulating the control-flow graph. -use build::{CFG, Location}; +use build::CFG; use rustc::mir::repr::*; impl<'tcx> CFG<'tcx> { diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs index 3cd79ee1c5..61375a3610 100644 --- a/src/librustc_mir/build/expr/as_rvalue.rs +++ b/src/librustc_mir/build/expr/as_rvalue.rs @@ -185,6 +185,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ExprKind::Adt { adt_def, variant_index, substs, fields, base } => { // see (*) above + let is_union = adt_def.is_union(); + let active_field_index = if is_union { Some(fields[0].name.index()) } else { None }; + // first process the set of fields that were provided // (evaluating them in order given by user) let fields_map: FnvHashMap<_, _> = @@ -208,11 +211,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }) .collect() } else { - field_names.iter().map(|n| fields_map[n].clone()).collect() + field_names.iter().filter_map(|n| fields_map.get(n).cloned()).collect() }; - block.and(Rvalue::Aggregate(AggregateKind::Adt(adt_def, variant_index, substs), - fields)) + let adt = AggregateKind::Adt(adt_def, variant_index, substs, active_field_index); + block.and(Rvalue::Aggregate(adt, fields)) } ExprKind::Assign { .. } | ExprKind::AssignOp { .. } => { diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index ec390704d0..1b64b4d0b5 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -278,7 +278,7 @@ struct Binding<'tcx> { var_id: NodeId, var_ty: Ty<'tcx>, mutability: Mutability, - binding_mode: BindingMode, + binding_mode: BindingMode<'tcx>, } #[derive(Clone, Debug)] diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index 8c9ed53c8a..bf43bfb326 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -293,7 +293,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { assert!(ty.is_slice()); let eq_def_id = self.hir.tcx().lang_items.eq_trait().unwrap(); let ty = mt.ty; - let (mty, method) = self.hir.trait_method(eq_def_id, "eq", ty, vec![ty]); + let (mty, method) = self.hir.trait_method(eq_def_id, "eq", ty, &[ty]); let bool_ty = self.hir.bool_ty(); let eq_result = self.temp(bool_ty); diff --git a/src/librustc_mir/build/mod.rs b/src/librustc_mir/build/mod.rs index 26eb782a73..23591f05b8 100644 --- a/src/librustc_mir/build/mod.rs +++ b/src/librustc_mir/build/mod.rs @@ -101,16 +101,6 @@ pub struct ScopeAuxiliary { pub postdoms: Vec, } -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -pub struct Location { - /// the location is within this block - pub block: BasicBlock, - - /// the location is the start of the this statement; or, if `statement_index` - /// == num-statements, then the start of the terminator. - pub statement_index: usize, -} - pub type ScopeAuxiliaryVec = IndexVec; /////////////////////////////////////////////////////////////////////////// @@ -207,8 +197,9 @@ pub fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, // Gather the upvars of a closure, if any. let upvar_decls: Vec<_> = tcx.with_freevars(fn_id, |freevars| { freevars.iter().map(|fv| { + let var_id = tcx.map.as_local_node_id(fv.def.def_id()).unwrap(); let by_ref = tcx.upvar_capture(ty::UpvarId { - var_id: fv.def.var_id(), + var_id: var_id, closure_expr_id: fn_id }).map_or(false, |capture| match capture { ty::UpvarCapture::ByValue => false, @@ -218,7 +209,7 @@ pub fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, debug_name: keywords::Invalid.name(), by_ref: by_ref }; - if let Some(hir::map::NodeLocal(pat)) = tcx.map.find(fv.def.var_id()) { + if let Some(hir::map::NodeLocal(pat)) = tcx.map.find(var_id) { if let hir::PatKind::Binding(_, ref ident, _) = pat.node { decl.debug_name = ident.node; } diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index dc1d63a291..0b33e5a145 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -89,13 +89,15 @@ should go to. use build::{BlockAnd, BlockAndExtension, Builder, CFG, ScopeAuxiliary, ScopeId}; use rustc::middle::region::{CodeExtent, CodeExtentData}; use rustc::middle::lang_items; -use rustc::ty::subst::{Substs, Subst, VecPerParamSpace}; +use rustc::ty::subst::{Kind, Substs, Subst}; use rustc::ty::{Ty, TyCtxt}; use rustc::mir::repr::*; use syntax_pos::Span; use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::fnv::FnvHashMap; +use std::iter; + pub struct Scope<'tcx> { /// the scope-id within the scope_auxiliary id: ScopeId, @@ -789,10 +791,7 @@ fn build_free<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, -> TerminatorKind<'tcx> { let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem) .unwrap_or_else(|e| tcx.sess.fatal(&e)); - let substs = tcx.mk_substs(Substs::new( - VecPerParamSpace::new(vec![], vec![], vec![data.item_ty]), - VecPerParamSpace::new(vec![], vec![], vec![]) - )); + let substs = Substs::new(tcx, iter::once(Kind::from(data.item_ty))); TerminatorKind::Call { func: Operand::Constant(Constant { span: data.span, diff --git a/src/librustc_mir/def_use.rs b/src/librustc_mir/def_use.rs new file mode 100644 index 0000000000..11b4441c84 --- /dev/null +++ b/src/librustc_mir/def_use.rs @@ -0,0 +1,197 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Def-use analysis. + +use rustc::mir::repr::{Local, Location, Lvalue, Mir}; +use rustc::mir::visit::{LvalueContext, MutVisitor, Visitor}; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; +use std::marker::PhantomData; +use std::mem; + +pub struct DefUseAnalysis<'tcx> { + info: IndexVec>, + mir_summary: MirSummary, +} + +#[derive(Clone)] +pub struct Info<'tcx> { + pub defs_and_uses: Vec>, +} + +#[derive(Clone)] +pub struct Use<'tcx> { + pub context: LvalueContext<'tcx>, + pub location: Location, +} + +impl<'tcx> DefUseAnalysis<'tcx> { + pub fn new(mir: &Mir<'tcx>) -> DefUseAnalysis<'tcx> { + DefUseAnalysis { + info: IndexVec::from_elem_n(Info::new(), mir.count_locals()), + mir_summary: MirSummary::new(mir), + } + } + + pub fn analyze(&mut self, mir: &Mir<'tcx>) { + let mut finder = DefUseFinder { + info: mem::replace(&mut self.info, IndexVec::new()), + mir_summary: self.mir_summary, + }; + finder.visit_mir(mir); + self.info = finder.info + } + + pub fn local_info(&self, local: Local) -> &Info<'tcx> { + &self.info[local] + } + + pub fn local_info_mut(&mut self, local: Local) -> &mut Info<'tcx> { + &mut self.info[local] + } + + fn mutate_defs_and_uses(&self, local: Local, mir: &mut Mir<'tcx>, mut callback: F) + where F: for<'a> FnMut(&'a mut Lvalue<'tcx>, + LvalueContext<'tcx>, + Location) { + for lvalue_use in &self.info[local].defs_and_uses { + MutateUseVisitor::new(local, + &mut callback, + self.mir_summary, + mir).visit_location(mir, lvalue_use.location) + } + } + + /// FIXME(pcwalton): This should update the def-use chains. + pub fn replace_all_defs_and_uses_with(&self, + local: Local, + mir: &mut Mir<'tcx>, + new_lvalue: Lvalue<'tcx>) { + self.mutate_defs_and_uses(local, mir, |lvalue, _, _| *lvalue = new_lvalue.clone()) + } +} + +struct DefUseFinder<'tcx> { + info: IndexVec>, + mir_summary: MirSummary, +} + +impl<'tcx> DefUseFinder<'tcx> { + fn lvalue_mut_info(&mut self, lvalue: &Lvalue<'tcx>) -> Option<&mut Info<'tcx>> { + let info = &mut self.info; + self.mir_summary.local_index(lvalue).map(move |local| &mut info[local]) + } +} + +impl<'tcx> Visitor<'tcx> for DefUseFinder<'tcx> { + fn visit_lvalue(&mut self, + lvalue: &Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + if let Some(ref mut info) = self.lvalue_mut_info(lvalue) { + info.defs_and_uses.push(Use { + context: context, + location: location, + }) + } + self.super_lvalue(lvalue, context, location) + } +} + +impl<'tcx> Info<'tcx> { + fn new() -> Info<'tcx> { + Info { + defs_and_uses: vec![], + } + } + + pub fn def_count(&self) -> usize { + self.defs_and_uses.iter().filter(|lvalue_use| lvalue_use.context.is_mutating_use()).count() + } + + pub fn def_count_not_including_drop(&self) -> usize { + self.defs_and_uses.iter().filter(|lvalue_use| { + lvalue_use.context.is_mutating_use() && !lvalue_use.context.is_drop() + }).count() + } + + pub fn use_count(&self) -> usize { + self.defs_and_uses.iter().filter(|lvalue_use| { + lvalue_use.context.is_nonmutating_use() + }).count() + } +} + +struct MutateUseVisitor<'tcx, F> { + query: Local, + callback: F, + mir_summary: MirSummary, + phantom: PhantomData<&'tcx ()>, +} + +impl<'tcx, F> MutateUseVisitor<'tcx, F> { + fn new(query: Local, callback: F, mir_summary: MirSummary, _: &Mir<'tcx>) + -> MutateUseVisitor<'tcx, F> + where F: for<'a> FnMut(&'a mut Lvalue<'tcx>, LvalueContext<'tcx>, Location) { + MutateUseVisitor { + query: query, + callback: callback, + mir_summary: mir_summary, + phantom: PhantomData, + } + } +} + +impl<'tcx, F> MutVisitor<'tcx> for MutateUseVisitor<'tcx, F> + where F: for<'a> FnMut(&'a mut Lvalue<'tcx>, LvalueContext<'tcx>, Location) { + fn visit_lvalue(&mut self, + lvalue: &mut Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + if self.mir_summary.local_index(lvalue) == Some(self.query) { + (self.callback)(lvalue, context, location) + } + self.super_lvalue(lvalue, context, location) + } +} + +/// A small structure that enables various metadata of the MIR to be queried +/// without a reference to the MIR itself. +#[derive(Clone, Copy)] +pub struct MirSummary { + arg_count: usize, + var_count: usize, + temp_count: usize, +} + +impl MirSummary { + pub fn new(mir: &Mir) -> MirSummary { + MirSummary { + arg_count: mir.arg_decls.len(), + var_count: mir.var_decls.len(), + temp_count: mir.temp_decls.len(), + } + } + + pub fn local_index<'tcx>(&self, lvalue: &Lvalue<'tcx>) -> Option { + match *lvalue { + Lvalue::Arg(arg) => Some(Local::new(arg.index())), + Lvalue::Var(var) => Some(Local::new(var.index() + self.arg_count)), + Lvalue::Temp(temp) => { + Some(Local::new(temp.index() + self.arg_count + self.var_count)) + } + Lvalue::ReturnPointer => { + Some(Local::new(self.arg_count + self.var_count + self.temp_count)) + } + _ => None, + } + } +} + diff --git a/src/librustc_mir/diagnostics.rs b/src/librustc_mir/diagnostics.rs index 4a731d898a..eb16812af9 100644 --- a/src/librustc_mir/diagnostics.rs +++ b/src/librustc_mir/diagnostics.rs @@ -18,7 +18,7 @@ for the entire lifetime of a program. Creating a boxed value allocates memory on the heap at runtime, and therefore cannot be done at compile time. Erroneous code example: -```compile_fail +```compile_fail,E0010 #![feature(box_syntax)] const CON : Box = box 0; @@ -30,7 +30,7 @@ Static and const variables can refer to other const variables. But a const variable cannot refer to a static variable. For example, `Y` cannot refer to `X` here: -```compile_fail +```compile_fail,E0013 static X: i32 = 42; const Y: i32 = X; ``` @@ -66,7 +66,7 @@ E0016: r##" Blocks in constants may only contain items (such as constant, function definition, etc...) and a tail expression. Erroneous code example: -```compile_fail +```compile_fail,E0016 const FOO: i32 = { let x = 0; x }; // 'x' isn't an item! ``` @@ -81,7 +81,7 @@ E0017: r##" References in statics and constants may only refer to immutable values. Erroneous code example: -```compile_fail +```compile_fail,E0017 static X: i32 = 1; const C: i32 = 2; @@ -107,7 +107,7 @@ vary. For example, if you write: -```compile_fail +```compile_fail,E0018 static MY_STATIC: u32 = 42; static MY_STATIC_ADDR: usize = &MY_STATIC as *const _ as usize; static WHAT: usize = (MY_STATIC_ADDR^17) + MY_STATIC_ADDR; @@ -152,7 +152,7 @@ impl Test { fn main() { const FOO: Test = Test::V1; - const A: i32 = FOO.test(); // You can't call Test::func() here ! + const A: i32 = FOO.test(); // You can't call Test::func() here! } ``` @@ -214,14 +214,13 @@ static B: &'static u32 = &A; // ok! ``` "##, - E0395: r##" The value assigned to a constant scalar must be known at compile time, which is not the case when comparing raw pointers. Erroneous code example: -```compile_fail +```compile_fail,E0395 static FOO: i32 = 42; static BAR: i32 = 42; @@ -250,7 +249,7 @@ The value behind a raw pointer can't be determined at compile-time (or even link-time), which means it can't be used in a constant expression. Erroneous code example: -```compile_fail +```compile_fail,E0396 const REG_ADDR: *const u8 = 0x5f3759df as *const u8; const VALUE: u8 = unsafe { *REG_ADDR }; @@ -272,7 +271,7 @@ E0492: r##" A borrow of a constant containing interior mutability was attempted. Erroneous code example: -```compile_fail +```compile_fail,E0492 use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; const A: AtomicUsize = ATOMIC_USIZE_INIT; @@ -299,7 +298,7 @@ static B: &'static AtomicUsize = &A; // ok! You can also have this error while using a cell type: -```compile_fail +```compile_fail,E0492 #![feature(const_fn)] use std::cell::Cell; @@ -351,7 +350,7 @@ E0493: r##" A type with a destructor was assigned to an invalid type of variable. Erroneous code example: -```compile_fail +```compile_fail,E0493 struct Foo { a: u32 } @@ -374,7 +373,7 @@ E0494: r##" A reference of an interior static was assigned to another const/static. Erroneous code example: -```compile_fail +```compile_fail,E0494 struct Foo { a: u32 } diff --git a/src/librustc_mir/hair/cx/expr.rs b/src/librustc_mir/hair/cx/expr.rs index 605798ad44..ea7bc99dd0 100644 --- a/src/librustc_mir/hair/cx/expr.rs +++ b/src/librustc_mir/hair/cx/expr.rs @@ -19,7 +19,7 @@ use rustc::hir::def::Def; use rustc::middle::const_val::ConstVal; use rustc_const_eval as const_eval; use rustc::middle::region::CodeExtent; -use rustc::ty::{self, VariantDef, Ty}; +use rustc::ty::{self, AdtKind, VariantDef, Ty}; use rustc::ty::cast::CastKind as TyCastKind; use rustc::mir::repr::*; use rustc::hir; @@ -108,7 +108,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { region, ty::TypeAndMut { ty: expr.ty, mutbl: mutbl }), span: expr.span, kind: ExprKind::Borrow { - region: *region, + region: region, borrow_kind: to_borrow_kind(mutbl), arg: expr.to_ref() } @@ -137,7 +137,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { ty: adjusted_ty, span: self.span, kind: ExprKind::Borrow { - region: *r, + region: r, borrow_kind: to_borrow_kind(m), arg: expr.to_ref(), }, @@ -154,7 +154,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { ty: cx.tcx.mk_ref(region, ty::TypeAndMut { ty: expr.ty, mutbl: m }), span: self.span, kind: ExprKind::Borrow { - region: *region, + region: region, borrow_kind: to_borrow_kind(m), arg: expr.to_ref(), }, @@ -217,7 +217,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let kind = match expr.node { // Here comes the interesting stuff: - hir::ExprMethodCall(_, _, ref args) => { + hir::ExprMethodCall(.., ref args) => { // Rewrite a.b(c) into UFCS form like Trait::b(a, c) let expr = method_callee(cx, expr, ty::MethodCall::expr(expr.id)); let args = args.iter() @@ -242,7 +242,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let method = method_callee(cx, expr, ty::MethodCall::expr(expr.id)); let sig = match method.ty.sty { - ty::TyFnDef(_, _, fn_ty) => &fn_ty.sig, + ty::TyFnDef(.., fn_ty) => &fn_ty.sig, _ => span_bug!(expr.span, "type of method is not an fn") }; @@ -271,7 +271,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, // Tuple-like ADTs are represented as ExprCall. We convert them here. expr_ty.ty_adt_def().and_then(|adt_def|{ match cx.tcx.expect_def(fun.id) { - Def::Variant(_, variant_id) => { + Def::Variant(variant_id) => { Some((adt_def, adt_def.variant_index_with_id(variant_id))) }, Def::Struct(..) => { @@ -310,7 +310,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, _ => span_bug!(expr.span, "type of & not region"), }; ExprKind::Borrow { - region: *region, + region: region, borrow_kind: to_borrow_kind(mutbl), arg: expr.to_ref(), } @@ -459,48 +459,49 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, hir::ExprStruct(_, ref fields, ref base) => { match expr_ty.sty { - ty::TyStruct(adt, substs) => { - let field_refs = field_refs(&adt.variants[0], fields); - ExprKind::Adt { - adt_def: adt, - variant_index: 0, - substs: substs, - fields: field_refs, - base: base.as_ref().map(|base| { - FruInfo { - base: base.to_ref(), - field_types: cx.tcx.tables - .borrow() - .fru_field_types[&expr.id] - .clone() - } - }) + ty::TyAdt(adt, substs) => match adt.adt_kind() { + AdtKind::Struct | AdtKind::Union => { + let field_refs = field_refs(&adt.variants[0], fields); + ExprKind::Adt { + adt_def: adt, + variant_index: 0, + substs: substs, + fields: field_refs, + base: base.as_ref().map(|base| { + FruInfo { + base: base.to_ref(), + field_types: cx.tcx.tables + .borrow() + .fru_field_types[&expr.id] + .clone() + } + }) + } } - } - ty::TyEnum(adt, substs) => { - match cx.tcx.expect_def(expr.id) { - Def::Variant(enum_id, variant_id) => { - debug_assert!(adt.did == enum_id); - assert!(base.is_none()); - - let index = adt.variant_index_with_id(variant_id); - let field_refs = field_refs(&adt.variants[index], fields); - ExprKind::Adt { - adt_def: adt, - variant_index: index, - substs: substs, - fields: field_refs, - base: None + AdtKind::Enum => { + match cx.tcx.expect_def(expr.id) { + Def::Variant(variant_id) => { + assert!(base.is_none()); + + let index = adt.variant_index_with_id(variant_id); + let field_refs = field_refs(&adt.variants[index], fields); + ExprKind::Adt { + adt_def: adt, + variant_index: index, + substs: substs, + fields: field_refs, + base: None + } + } + ref def => { + span_bug!( + expr.span, + "unexpected def: {:?}", + def); } - } - ref def => { - span_bug!( - expr.span, - "unexpected def: {:?}", - def); } } - } + }, _ => { span_bug!( expr.span, @@ -579,13 +580,10 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, body: block::to_expr_ref(cx, body) }, hir::ExprField(ref source, name) => { let index = match cx.tcx.expr_ty_adjusted(source).sty { - ty::TyStruct(adt_def, _) => + ty::TyAdt(adt_def, _) => adt_def.variants[0].index_of_field_named(name.node), ref ty => - span_bug!( - expr.span, - "field of non-struct: {:?}", - ty), + span_bug!(expr.span, "field of non-ADT: {:?}", ty), }; let index = index.unwrap_or_else(|| { span_bug!( @@ -680,7 +678,7 @@ fn convert_path_expr<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, ty::TyFnDef(..) => def_id, // A unit struct which is used as a value. We return a completely different ExprKind // here to account for this special case. - ty::TyStruct(adt_def, substs) => return ExprKind::Adt { + ty::TyAdt(adt_def, substs) => return ExprKind::Adt { adt_def: adt_def, variant_index: 0, substs: substs, @@ -689,13 +687,12 @@ fn convert_path_expr<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, }, ref sty => bug!("unexpected sty: {:?}", sty) }, - Def::Variant(enum_id, variant_id) => match cx.tcx.node_id_to_type(expr.id).sty { + Def::Variant(variant_id) => match cx.tcx.node_id_to_type(expr.id).sty { // A variant constructor. Should only be reached if not called in the same // expression. ty::TyFnDef(..) => variant_id, // A unit variant, similar special case to the struct case above. - ty::TyEnum(adt_def, substs) => { - debug_assert!(adt_def.did == enum_id); + ty::TyAdt(adt_def, substs) => { let index = adt_def.variant_index_with_id(variant_id); return ExprKind::Adt { adt_def: adt_def, @@ -730,20 +727,22 @@ fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let temp_lifetime = cx.tcx.region_maps.temporary_scope(expr.id); match def { - Def::Local(_, node_id) => { + Def::Local(def_id) => { + let node_id = cx.tcx.map.as_local_node_id(def_id).unwrap(); ExprKind::VarRef { id: node_id, } } - Def::Upvar(_, id_var, index, closure_expr_id) => { + Def::Upvar(def_id, index, closure_expr_id) => { + let id_var = cx.tcx.map.as_local_node_id(def_id).unwrap(); debug!("convert_var(upvar({:?}, {:?}, {:?}))", id_var, index, closure_expr_id); let var_ty = cx.tcx.node_id_to_type(id_var); let body_id = match cx.tcx.map.find(closure_expr_id) { Some(map::NodeExpr(expr)) => { match expr.node { - hir::ExprClosure(_, _, ref body, _) => body.id, + hir::ExprClosure(.., ref body, _) => body.id, _ => { span_bug!(expr.span, "closure expr is not a closure expr"); } @@ -842,8 +841,7 @@ fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, ExprKind::Deref { arg: Expr { temp_lifetime: temp_lifetime, - ty: cx.tcx.mk_ref( - cx.tcx.mk_region(borrow.region), + ty: cx.tcx.mk_ref(borrow.region, ty::TypeAndMut { ty: var_ty, mutbl: borrow.kind.to_mutbl_lossy() @@ -907,8 +905,7 @@ fn overloaded_operator<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } PassArgs::ByRef => { - let scope = cx.tcx.region_maps.node_extent(expr.id); - let region = cx.tcx.mk_region(ty::ReScope(scope)); + let region = cx.tcx.node_scope_region(expr.id); let temp_lifetime = cx.tcx.region_maps.temporary_scope(expr.id); argrefs.extend( args.iter() @@ -922,7 +919,7 @@ fn overloaded_operator<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, temp_lifetime: temp_lifetime, ty: adjusted_ty, span: expr.span, - kind: ExprKind::Borrow { region: *region, + kind: ExprKind::Borrow { region: region, borrow_kind: BorrowKind::Shared, arg: arg.to_ref() } }.to_ref() @@ -977,7 +974,7 @@ fn capture_freevar<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, freevar: &hir::Freevar, freevar_ty: Ty<'tcx>) -> ExprRef<'tcx> { - let id_var = freevar.def.var_id(); + let id_var = cx.tcx.map.as_local_node_id(freevar.def.def_id()).unwrap(); let upvar_id = ty::UpvarId { var_id: id_var, closure_expr_id: closure_expr.id, diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index df1fec7593..8dd33ad2f9 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -32,7 +32,6 @@ use rustc::ty::{self, Ty, TyCtxt}; use syntax::parse::token; use rustc::hir; use rustc_const_math::{ConstInt, ConstUsize}; -use syntax::attr::AttrMetaMethods; #[derive(Copy, Clone)] pub struct Cx<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { @@ -54,8 +53,8 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { MirSource::Fn(id) => { let fn_like = FnLikeNode::from_node(infcx.tcx.map.get(id)); match fn_like.map(|f| f.kind()) { - Some(FnKind::ItemFn(_, _, _, c, _, _, _)) => c, - Some(FnKind::Method(_, m, _, _)) => m.constness, + Some(FnKind::ItemFn(_, _, _, c, ..)) => c, + Some(FnKind::Method(_, m, ..)) => m.constness, _ => hir::Constness::NotConst } } @@ -144,19 +143,19 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { trait_def_id: DefId, method_name: &str, self_ty: Ty<'tcx>, - params: Vec>) + params: &[Ty<'tcx>]) -> (Ty<'tcx>, Literal<'tcx>) { let method_name = token::intern(method_name); - let substs = Substs::new_trait(params, vec![], self_ty); + let substs = Substs::new_trait(self.tcx, self_ty, params); for trait_item in self.tcx.trait_items(trait_def_id).iter() { match *trait_item { ty::ImplOrTraitItem::MethodTraitItem(ref method) => { if method.name == method_name { let method_ty = self.tcx.lookup_item_type(method.def_id); - let method_ty = method_ty.ty.subst(self.tcx, &substs); + let method_ty = method_ty.ty.subst(self.tcx, substs); return (method_ty, Literal::Item { def_id: method.def_id, - substs: self.tcx.mk_substs(substs), + substs: substs, }); } } diff --git a/src/librustc_mir/hair/cx/pattern.rs b/src/librustc_mir/hair/cx/pattern.rs index c54c8bfb59..7b8446b184 100644 --- a/src/librustc_mir/hair/cx/pattern.rs +++ b/src/librustc_mir/hair/cx/pattern.rs @@ -158,10 +158,11 @@ impl<'patcx, 'cx, 'gcx, 'tcx> PatCx<'patcx, 'cx, 'gcx, 'tcx> { } PatKind::Binding(bm, ref ident, ref sub) => { - let id = self.cx.tcx.expect_def(pat.id).var_id(); + let def_id = self.cx.tcx.expect_def(pat.id).def_id(); + let id = self.cx.tcx.map.as_local_node_id(def_id).unwrap(); let var_ty = self.cx.tcx.node_id_to_type(pat.id); let region = match var_ty.sty { - ty::TyRef(&r, _) => Some(r), + ty::TyRef(r, _) => Some(r), _ => None, }; let (mutability, mode) = match bm { @@ -198,8 +199,8 @@ impl<'patcx, 'cx, 'gcx, 'tcx> PatCx<'patcx, 'cx, 'gcx, 'tcx> { PatKind::TupleStruct(_, ref subpatterns, ddpos) => { let pat_ty = self.cx.tcx.node_id_to_type(pat.id); let adt_def = match pat_ty.sty { - ty::TyStruct(adt_def, _) | ty::TyEnum(adt_def, _) => adt_def, - _ => span_bug!(pat.span, "tuple struct pattern not applied to struct or enum"), + ty::TyAdt(adt_def, _) => adt_def, + _ => span_bug!(pat.span, "tuple struct pattern not applied to an ADT"), }; let variant_def = adt_def.variant_of_def(self.cx.tcx.expect_def(pat.id)); @@ -217,11 +218,11 @@ impl<'patcx, 'cx, 'gcx, 'tcx> PatCx<'patcx, 'cx, 'gcx, 'tcx> { PatKind::Struct(_, ref fields, _) => { let pat_ty = self.cx.tcx.node_id_to_type(pat.id); let adt_def = match pat_ty.sty { - ty::TyStruct(adt_def, _) | ty::TyEnum(adt_def, _) => adt_def, + ty::TyAdt(adt_def, _) => adt_def, _ => { span_bug!( pat.span, - "struct pattern not applied to struct or enum"); + "struct pattern not applied to an ADT"); } }; let variant_def = adt_def.variant_of_def(self.cx.tcx.expect_def(pat.id)); @@ -300,7 +301,8 @@ impl<'patcx, 'cx, 'gcx, 'tcx> PatCx<'patcx, 'cx, 'gcx, 'tcx> { subpatterns: Vec>) -> PatternKind<'tcx> { match self.cx.tcx.expect_def(pat.id) { - Def::Variant(enum_id, variant_id) => { + Def::Variant(variant_id) => { + let enum_id = self.cx.tcx.parent_def_id(variant_id).unwrap(); let adt_def = self.cx.tcx.lookup_adt_def(enum_id); if adt_def.variants.len() > 1 { PatternKind::Variant { @@ -313,7 +315,8 @@ impl<'patcx, 'cx, 'gcx, 'tcx> PatCx<'patcx, 'cx, 'gcx, 'tcx> { } } - Def::Struct(..) | Def::TyAlias(..) | Def::AssociatedTy(..) => { + Def::Struct(..) | Def::Union(..) | + Def::TyAlias(..) | Def::AssociatedTy(..) => { PatternKind::Leaf { subpatterns: subpatterns } } diff --git a/src/librustc_mir/hair/mod.rs b/src/librustc_mir/hair/mod.rs index cc59346487..59137e2bcd 100644 --- a/src/librustc_mir/hair/mod.rs +++ b/src/librustc_mir/hair/mod.rs @@ -196,7 +196,7 @@ pub enum ExprKind<'tcx> { id: DefId, }, Borrow { - region: Region, + region: &'tcx Region, borrow_kind: BorrowKind, arg: ExprRef<'tcx>, }, @@ -287,7 +287,7 @@ pub enum PatternKind<'tcx> { Binding { mutability: Mutability, name: ast::Name, - mode: BindingMode, + mode: BindingMode<'tcx>, var: ast::NodeId, ty: Ty<'tcx>, subpattern: Option>, @@ -335,9 +335,9 @@ pub enum PatternKind<'tcx> { } #[derive(Copy, Clone, Debug)] -pub enum BindingMode { +pub enum BindingMode<'tcx> { ByValue, - ByRef(Region, BorrowKind), + ByRef(&'tcx Region, BorrowKind), } #[derive(Clone, Debug)] diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 3d01d49c53..02f15602d7 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -22,10 +22,11 @@ Rust MIR: a lowered representation of Rust. Also: an experiment! #![feature(associated_consts)] #![feature(box_patterns)] +#![feature(dotdot_in_tuple_patterns)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] #![feature(staged_api)] -#![feature(question_mark)] +#![cfg_attr(stage0, feature(question_mark))] #[macro_use] extern crate log; extern crate graphviz as dot; @@ -45,8 +46,10 @@ extern crate rustc_const_eval; pub mod diagnostics; pub mod build; +pub mod def_use; pub mod graphviz; mod hair; pub mod mir_map; pub mod pretty; pub mod transform; + diff --git a/src/librustc_mir/pretty.rs b/src/librustc_mir/pretty.rs index c58491096b..01e2c6308b 100644 --- a/src/librustc_mir/pretty.rs +++ b/src/librustc_mir/pretty.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use build::{Location, ScopeAuxiliaryVec, ScopeId}; +use build::{ScopeAuxiliaryVec, ScopeId}; use rustc::hir; use rustc::hir::def_id::DefId; use rustc::mir::repr::*; @@ -77,12 +77,12 @@ pub fn dump_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, node_id, promotion_id, pass_name, disambiguator); file_path.push(&file_name); let _ = fs::File::create(&file_path).and_then(|mut file| { - try!(writeln!(file, "// MIR for `{}`", node_path)); - try!(writeln!(file, "// node_id = {}", node_id)); - try!(writeln!(file, "// pass_name = {}", pass_name)); - try!(writeln!(file, "// disambiguator = {}", disambiguator)); - try!(writeln!(file, "")); - try!(write_mir_fn(tcx, src, mir, &mut file, auxiliary)); + writeln!(file, "// MIR for `{}`", node_path)?; + writeln!(file, "// node_id = {}", node_id)?; + writeln!(file, "// pass_name = {}", pass_name)?; + writeln!(file, "// disambiguator = {}", disambiguator)?; + writeln!(file, "")?; + write_mir_fn(tcx, src, mir, &mut file, auxiliary)?; Ok(()) }); } diff --git a/src/librustc_mir/transform/copy_prop.rs b/src/librustc_mir/transform/copy_prop.rs new file mode 100644 index 0000000000..79fd16012d --- /dev/null +++ b/src/librustc_mir/transform/copy_prop.rs @@ -0,0 +1,334 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Trivial copy propagation pass. +//! +//! This uses def-use analysis to remove values that have exactly one def and one use, which must +//! be an assignment. +//! +//! To give an example, we look for patterns that look like: +//! +//! DEST = SRC +//! ... +//! USE(DEST) +//! +//! where `DEST` and `SRC` are both locals of some form. We replace that with: +//! +//! NOP +//! ... +//! USE(SRC) +//! +//! The assignment `DEST = SRC` must be (a) the only mutation of `DEST` and (b) the only +//! (non-mutating) use of `SRC`. These restrictions are conservative and may be relaxed in the +//! future. + +use def_use::{DefUseAnalysis, MirSummary}; +use rustc::mir::repr::{Constant, Local, Location, Lvalue, Mir, Operand, Rvalue, StatementKind}; +use rustc::mir::transform::{MirPass, MirSource, Pass}; +use rustc::mir::visit::MutVisitor; +use rustc::ty::TyCtxt; +use rustc_data_structures::indexed_vec::Idx; +use transform::qualify_consts; + +pub struct CopyPropagation; + +impl Pass for CopyPropagation {} + +impl<'tcx> MirPass<'tcx> for CopyPropagation { + fn run_pass<'a>(&mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + source: MirSource, + mir: &mut Mir<'tcx>) { + match source { + MirSource::Const(_) => { + // Don't run on constants, because constant qualification might reject the + // optimized IR. + return + } + MirSource::Static(..) | MirSource::Promoted(..) => { + // Don't run on statics and promoted statics, because trans might not be able to + // evaluate the optimized IR. + return + } + MirSource::Fn(function_node_id) => { + if qualify_consts::is_const_fn(tcx, tcx.map.local_def_id(function_node_id)) { + // Don't run on const functions, as, again, trans might not be able to evaluate + // the optimized IR. + return + } + } + } + + // We only run when the MIR optimization level is at least 1. This avoids messing up debug + // info. + match tcx.sess.opts.debugging_opts.mir_opt_level { + Some(0) | None => return, + _ => {} + } + + loop { + let mut def_use_analysis = DefUseAnalysis::new(mir); + def_use_analysis.analyze(mir); + + let mut changed = false; + for dest_local_index in 0..mir.count_locals() { + let dest_local = Local::new(dest_local_index); + debug!("Considering destination local: {}", mir.format_local(dest_local)); + + let action; + let location; + { + // The destination must have exactly one def. + let dest_use_info = def_use_analysis.local_info(dest_local); + let dest_def_count = dest_use_info.def_count_not_including_drop(); + if dest_def_count == 0 { + debug!(" Can't copy-propagate local: dest {} undefined", + mir.format_local(dest_local)); + continue + } + if dest_def_count > 1 { + debug!(" Can't copy-propagate local: dest {} defined {} times", + mir.format_local(dest_local), + dest_use_info.def_count()); + continue + } + if dest_use_info.use_count() == 0 { + debug!(" Can't copy-propagate local: dest {} unused", + mir.format_local(dest_local)); + continue + } + let dest_lvalue_def = dest_use_info.defs_and_uses.iter().filter(|lvalue_def| { + lvalue_def.context.is_mutating_use() && !lvalue_def.context.is_drop() + }).next().unwrap(); + location = dest_lvalue_def.location; + + let basic_block = &mir[location.block]; + let statement_index = location.statement_index; + let statement = match basic_block.statements.get(statement_index) { + Some(statement) => statement, + None => { + debug!(" Can't copy-propagate local: used in terminator"); + continue + } + }; + + // That use of the source must be an assignment. + match statement.kind { + StatementKind::Assign(ref dest_lvalue, Rvalue::Use(ref operand)) if + Some(dest_local) == mir.local_index(dest_lvalue) => { + let maybe_action = match *operand { + Operand::Consume(ref src_lvalue) => { + Action::local_copy(mir, &def_use_analysis, src_lvalue) + } + Operand::Constant(ref src_constant) => { + Action::constant(src_constant) + } + }; + match maybe_action { + Some(this_action) => action = this_action, + None => continue, + } + } + _ => { + debug!(" Can't copy-propagate local: source use is not an \ + assignment"); + continue + } + } + } + + changed = action.perform(mir, &def_use_analysis, dest_local, location) || changed; + // FIXME(pcwalton): Update the use-def chains to delete the instructions instead of + // regenerating the chains. + break + } + if !changed { + break + } + } + } +} + +enum Action<'tcx> { + PropagateLocalCopy(Local), + PropagateConstant(Constant<'tcx>), +} + +impl<'tcx> Action<'tcx> { + fn local_copy(mir: &Mir<'tcx>, def_use_analysis: &DefUseAnalysis, src_lvalue: &Lvalue<'tcx>) + -> Option> { + // The source must be a local. + let src_local = match mir.local_index(src_lvalue) { + Some(src_local) => src_local, + None => { + debug!(" Can't copy-propagate local: source is not a local"); + return None + } + }; + + // We're trying to copy propagate a local. + // There must be exactly one use of the source used in a statement (not in a terminator). + let src_use_info = def_use_analysis.local_info(src_local); + let src_use_count = src_use_info.use_count(); + if src_use_count == 0 { + debug!(" Can't copy-propagate local: no uses"); + return None + } + if src_use_count != 1 { + debug!(" Can't copy-propagate local: {} uses", src_use_info.use_count()); + return None + } + + // Verify that the source doesn't change in between. This is done conservatively for now, + // by ensuring that the source has exactly one mutation. The goal is to prevent things + // like: + // + // DEST = SRC; + // SRC = X; + // USE(DEST); + // + // From being misoptimized into: + // + // SRC = X; + // USE(SRC); + let src_def_count = src_use_info.def_count_not_including_drop(); + if src_def_count != 1 { + debug!(" Can't copy-propagate local: {} defs of src", + src_use_info.def_count_not_including_drop()); + return None + } + + Some(Action::PropagateLocalCopy(src_local)) + } + + fn constant(src_constant: &Constant<'tcx>) -> Option> { + Some(Action::PropagateConstant((*src_constant).clone())) + } + + fn perform(self, + mir: &mut Mir<'tcx>, + def_use_analysis: &DefUseAnalysis<'tcx>, + dest_local: Local, + location: Location) + -> bool { + match self { + Action::PropagateLocalCopy(src_local) => { + // Eliminate the destination and the assignment. + // + // First, remove all markers. + // + // FIXME(pcwalton): Don't do this. Merge live ranges instead. + debug!(" Replacing all uses of {} with {} (local)", + mir.format_local(dest_local), + mir.format_local(src_local)); + for lvalue_use in &def_use_analysis.local_info(dest_local).defs_and_uses { + if lvalue_use.context.is_storage_marker() { + mir.make_statement_nop(lvalue_use.location) + } + } + for lvalue_use in &def_use_analysis.local_info(src_local).defs_and_uses { + if lvalue_use.context.is_storage_marker() { + mir.make_statement_nop(lvalue_use.location) + } + } + + // Replace all uses of the destination local with the source local. + let src_lvalue = Lvalue::from_local(mir, src_local); + def_use_analysis.replace_all_defs_and_uses_with(dest_local, mir, src_lvalue); + + // Finally, zap the now-useless assignment instruction. + debug!(" Deleting assignment"); + mir.make_statement_nop(location); + + true + } + Action::PropagateConstant(src_constant) => { + // First, remove all markers. + // + // FIXME(pcwalton): Don't do this. Merge live ranges instead. + debug!(" Replacing all uses of {} with {:?} (constant)", + mir.format_local(dest_local), + src_constant); + let dest_local_info = def_use_analysis.local_info(dest_local); + for lvalue_use in &dest_local_info.defs_and_uses { + if lvalue_use.context.is_storage_marker() { + mir.make_statement_nop(lvalue_use.location) + } + } + + // Replace all uses of the destination local with the constant. + let mut visitor = ConstantPropagationVisitor::new(MirSummary::new(mir), + dest_local, + src_constant); + for dest_lvalue_use in &dest_local_info.defs_and_uses { + visitor.visit_location(mir, dest_lvalue_use.location) + } + + // Zap the assignment instruction if we eliminated all the uses. We won't have been + // able to do that if the destination was used in a projection, because projections + // must have lvalues on their LHS. + let use_count = dest_local_info.use_count(); + if visitor.uses_replaced == use_count { + debug!(" {} of {} use(s) replaced; deleting assignment", + visitor.uses_replaced, + use_count); + mir.make_statement_nop(location); + true + } else if visitor.uses_replaced == 0 { + debug!(" No uses replaced; not deleting assignment"); + false + } else { + debug!(" {} of {} use(s) replaced; not deleting assignment", + visitor.uses_replaced, + use_count); + true + } + } + } + } +} + +struct ConstantPropagationVisitor<'tcx> { + dest_local: Local, + constant: Constant<'tcx>, + mir_summary: MirSummary, + uses_replaced: usize, +} + +impl<'tcx> ConstantPropagationVisitor<'tcx> { + fn new(mir_summary: MirSummary, dest_local: Local, constant: Constant<'tcx>) + -> ConstantPropagationVisitor<'tcx> { + ConstantPropagationVisitor { + dest_local: dest_local, + constant: constant, + mir_summary: mir_summary, + uses_replaced: 0, + } + } +} + +impl<'tcx> MutVisitor<'tcx> for ConstantPropagationVisitor<'tcx> { + fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) { + self.super_operand(operand, location); + + match *operand { + Operand::Consume(ref lvalue) => { + if self.mir_summary.local_index(lvalue) != Some(self.dest_local) { + return + } + } + Operand::Constant(_) => return, + } + + *operand = Operand::Constant(self.constant.clone()); + self.uses_replaced += 1 + } +} + diff --git a/src/librustc_mir/transform/deaggregator.rs b/src/librustc_mir/transform/deaggregator.rs index cb3010a5cf..77af02c18c 100644 --- a/src/librustc_mir/transform/deaggregator.rs +++ b/src/librustc_mir/transform/deaggregator.rs @@ -57,7 +57,7 @@ impl<'tcx> MirPass<'tcx> for Deaggregator { _ => span_bug!(src_info.span, "expected aggregate, not {:?}", rhs), }; let (adt_def, variant, substs) = match agg_kind { - &AggregateKind::Adt(adt_def, variant, substs) => (adt_def, variant, substs), + &AggregateKind::Adt(adt_def, variant, substs, None) => (adt_def, variant, substs), _ => span_bug!(src_info.span, "expected struct, not {:?}", rhs), }; let n = bb.statements.len(); @@ -120,7 +120,7 @@ fn get_aggregate_statement_index<'a, 'tcx, 'b>(start: usize, _ => continue, }; let (adt_def, variant) = match kind { - &AggregateKind::Adt(adt_def, variant, _) => (adt_def, variant), + &AggregateKind::Adt(adt_def, variant, _, None) => (adt_def, variant), _ => continue, }; if operands.len() == 0 { diff --git a/src/librustc_mir/transform/dump_mir.rs b/src/librustc_mir/transform/dump_mir.rs index 642adeee5c..694b017bbd 100644 --- a/src/librustc_mir/transform/dump_mir.rs +++ b/src/librustc_mir/transform/dump_mir.rs @@ -26,7 +26,7 @@ impl<'b, 'tcx> MirPass<'tcx> for Marker<'b> { } impl<'b> Pass for Marker<'b> { - fn name(&self) -> &str { self.0 } + fn name(&self) -> ::std::borrow::Cow<'static, str> { String::from(self.0).into() } } pub struct Disambiguator<'a> { @@ -58,7 +58,7 @@ impl<'tcx> MirPassHook<'tcx> for DumpMir { { pretty::dump_mir( tcx, - pass.name(), + &*pass.name(), &Disambiguator { pass: pass, is_after: is_after diff --git a/src/librustc_mir/transform/instcombine.rs b/src/librustc_mir/transform/instcombine.rs new file mode 100644 index 0000000000..a0331f03b0 --- /dev/null +++ b/src/librustc_mir/transform/instcombine.rs @@ -0,0 +1,110 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Performs various peephole optimizations. + +use rustc::mir::repr::{Location, Lvalue, Mir, Operand, ProjectionElem, Rvalue}; +use rustc::mir::transform::{MirPass, MirSource, Pass}; +use rustc::mir::visit::{MutVisitor, Visitor}; +use rustc::ty::TyCtxt; +use rustc::util::nodemap::FnvHashSet; +use std::mem; + +pub struct InstCombine { + optimizations: OptimizationList, +} + +impl InstCombine { + pub fn new() -> InstCombine { + InstCombine { + optimizations: OptimizationList::default(), + } + } +} + +impl Pass for InstCombine {} + +impl<'tcx> MirPass<'tcx> for InstCombine { + fn run_pass<'a>(&mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + _: MirSource, + mir: &mut Mir<'tcx>) { + // We only run when optimizing MIR (at any level). + if tcx.sess.opts.debugging_opts.mir_opt_level == Some(0) { + return + } + + // First, find optimization opportunities. This is done in a pre-pass to keep the MIR + // read-only so that we can do global analyses on the MIR in the process (e.g. + // `Lvalue::ty()`). + { + let mut optimization_finder = OptimizationFinder::new(mir, tcx); + optimization_finder.visit_mir(mir); + self.optimizations = optimization_finder.optimizations + } + + // Then carry out those optimizations. + MutVisitor::visit_mir(&mut *self, mir); + } +} + +impl<'tcx> MutVisitor<'tcx> for InstCombine { + fn visit_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>, location: Location) { + if self.optimizations.and_stars.remove(&location) { + debug!("Replacing `&*`: {:?}", rvalue); + let new_lvalue = match *rvalue { + Rvalue::Ref(_, _, Lvalue::Projection(ref mut projection)) => { + mem::replace(&mut projection.base, Lvalue::ReturnPointer) + } + _ => bug!("Detected `&*` but didn't find `&*`!"), + }; + *rvalue = Rvalue::Use(Operand::Consume(new_lvalue)) + } + + self.super_rvalue(rvalue, location) + } +} + +/// Finds optimization opportunities on the MIR. +struct OptimizationFinder<'b, 'a, 'tcx:'a+'b> { + mir: &'b Mir<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + optimizations: OptimizationList, +} + +impl<'b, 'a, 'tcx:'b> OptimizationFinder<'b, 'a, 'tcx> { + fn new(mir: &'b Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> OptimizationFinder<'b, 'a, 'tcx> { + OptimizationFinder { + mir: mir, + tcx: tcx, + optimizations: OptimizationList::default(), + } + } +} + +impl<'b, 'a, 'tcx> Visitor<'tcx> for OptimizationFinder<'b, 'a, 'tcx> { + fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { + if let Rvalue::Ref(_, _, Lvalue::Projection(ref projection)) = *rvalue { + if let ProjectionElem::Deref = projection.elem { + if projection.base.ty(self.mir, self.tcx).to_ty(self.tcx).is_region_ptr() { + self.optimizations.and_stars.insert(location); + } + } + } + + self.super_rvalue(rvalue, location) + } +} + +#[derive(Default)] +struct OptimizationList { + and_stars: FnvHashSet, +} + diff --git a/src/librustc_mir/transform/mod.rs b/src/librustc_mir/transform/mod.rs index c3485b8256..7bcb89b589 100644 --- a/src/librustc_mir/transform/mod.rs +++ b/src/librustc_mir/transform/mod.rs @@ -18,3 +18,5 @@ pub mod promote_consts; pub mod qualify_consts; pub mod dump_mir; pub mod deaggregator; +pub mod instcombine; +pub mod copy_prop; diff --git a/src/librustc_mir/transform/no_landing_pads.rs b/src/librustc_mir/transform/no_landing_pads.rs index 818f060ed4..32fddd293c 100644 --- a/src/librustc_mir/transform/no_landing_pads.rs +++ b/src/librustc_mir/transform/no_landing_pads.rs @@ -19,7 +19,10 @@ use rustc::mir::transform::{Pass, MirPass, MirSource}; pub struct NoLandingPads; impl<'tcx> MutVisitor<'tcx> for NoLandingPads { - fn visit_terminator(&mut self, bb: BasicBlock, terminator: &mut Terminator<'tcx>) { + fn visit_terminator(&mut self, + bb: BasicBlock, + terminator: &mut Terminator<'tcx>, + location: Location) { match terminator.kind { TerminatorKind::Goto { .. } | TerminatorKind::Resume | @@ -37,7 +40,7 @@ impl<'tcx> MutVisitor<'tcx> for NoLandingPads { unwind.take(); }, } - self.super_terminator(bb, terminator); + self.super_terminator(bb, terminator, location); } } diff --git a/src/librustc_mir/transform/promote_consts.rs b/src/librustc_mir/transform/promote_consts.rs index 21b406c3bf..57de68fce1 100644 --- a/src/librustc_mir/transform/promote_consts.rs +++ b/src/librustc_mir/transform/promote_consts.rs @@ -28,11 +28,10 @@ use rustc::mir::traversal::ReversePostorder; use rustc::ty::TyCtxt; use syntax_pos::Span; -use build::Location; - use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use std::mem; +use std::usize; /// State of a temporary during collection and promotion. #[derive(Copy, Clone, PartialEq, Eq, Debug)] @@ -77,13 +76,12 @@ pub enum Candidate { struct TempCollector { temps: IndexVec, - location: Location, span: Span } impl<'tcx> Visitor<'tcx> for TempCollector { - fn visit_lvalue(&mut self, lvalue: &Lvalue<'tcx>, context: LvalueContext) { - self.super_lvalue(lvalue, context); + fn visit_lvalue(&mut self, lvalue: &Lvalue<'tcx>, context: LvalueContext, location: Location) { + self.super_lvalue(lvalue, context, location); if let Lvalue::Temp(index) = *lvalue { // Ignore drops, if the temp gets promoted, // then it's constant and thus drop is noop. @@ -101,7 +99,7 @@ impl<'tcx> Visitor<'tcx> for TempCollector { LvalueContext::Store | LvalueContext::Call => { *temp = TempState::Defined { - location: self.location, + location: location, uses: 0 }; return; @@ -126,27 +124,11 @@ impl<'tcx> Visitor<'tcx> for TempCollector { fn visit_source_info(&mut self, source_info: &SourceInfo) { self.span = source_info.span; } - - fn visit_statement(&mut self, bb: BasicBlock, statement: &Statement<'tcx>) { - assert_eq!(self.location.block, bb); - self.super_statement(bb, statement); - self.location.statement_index += 1; - } - - fn visit_basic_block_data(&mut self, bb: BasicBlock, data: &BasicBlockData<'tcx>) { - self.location.statement_index = 0; - self.location.block = bb; - self.super_basic_block_data(bb, data); - } } pub fn collect_temps(mir: &Mir, rpo: &mut ReversePostorder) -> IndexVec { let mut collector = TempCollector { temps: IndexVec::from_elem(TempState::Undefined, &mir.temp_decls), - location: Location { - block: START_BLOCK, - statement_index: 0 - }, span: mir.span }; for (bb, data) in rpo { @@ -266,9 +248,15 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { // Then, recurse for components in the Rvalue or Call. if stmt_idx < no_stmts { - self.visit_rvalue(rvalue.as_mut().unwrap()); + self.visit_rvalue(rvalue.as_mut().unwrap(), Location { + block: bb, + statement_index: stmt_idx + }); } else { - self.visit_terminator_kind(bb, call.as_mut().unwrap()); + self.visit_terminator_kind(bb, call.as_mut().unwrap(), Location { + block: bb, + statement_index: no_stmts + }); } let new_temp = self.promoted.temp_decls.push(TempDecl { @@ -327,7 +315,10 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { } } }; - self.visit_rvalue(&mut rvalue); + self.visit_rvalue(&mut rvalue, Location{ + block: BasicBlock::new(0), + statement_index: usize::MAX + }); self.assign(Lvalue::ReturnPointer, rvalue, span); self.source.promoted.push(self.promoted); } @@ -335,11 +326,14 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { /// Replaces all temporaries with their promoted counterparts. impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> { - fn visit_lvalue(&mut self, lvalue: &mut Lvalue<'tcx>, context: LvalueContext) { + fn visit_lvalue(&mut self, + lvalue: &mut Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { if let Lvalue::Temp(ref mut temp) = *lvalue { *temp = self.promote_temp(*temp); } - self.super_lvalue(lvalue, context); + self.super_lvalue(lvalue, context, location); } } diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index 103a15dadb..2c03af2c8e 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -18,6 +18,7 @@ use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc::dep_graph::DepNode; use rustc::hir; +use rustc::hir::map as hir_map; use rustc::hir::def_id::DefId; use rustc::hir::intravisit::FnKind; use rustc::hir::map::blocks::FnLikeNode; @@ -36,8 +37,7 @@ use syntax_pos::Span; use std::collections::hash_map::Entry; use std::fmt; - -use build::Location; +use std::usize; use super::promote_consts::{self, Candidate, TempState}; @@ -116,14 +116,14 @@ impl fmt::Display for Mode { } } -fn is_const_fn(tcx: TyCtxt, def_id: DefId) -> bool { +pub fn is_const_fn(tcx: TyCtxt, def_id: DefId) -> bool { if let Some(node_id) = tcx.map.as_local_node_id(def_id) { let fn_like = FnLikeNode::from_node(tcx.map.get(node_id)); match fn_like.map(|f| f.kind()) { - Some(FnKind::ItemFn(_, _, _, c, _, _, _)) => { + Some(FnKind::ItemFn(_, _, _, c, ..)) => { c == hir::Constness::Const } - Some(FnKind::Method(_, m, _, _)) => { + Some(FnKind::Method(_, m, ..)) => { m.constness == hir::Constness::Const } _ => false @@ -147,7 +147,6 @@ struct Qualifier<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { return_qualif: Option, qualif: Qualif, const_fn_arg_vars: BitVector, - location: Location, temp_promotion_state: IndexVec, promotion_candidates: Vec } @@ -178,10 +177,6 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { return_qualif: None, qualif: Qualif::empty(), const_fn_arg_vars: BitVector::new(mir.var_decls.len()), - location: Location { - block: START_BLOCK, - statement_index: 0 - }, temp_promotion_state: temps, promotion_candidates: vec![] } @@ -258,14 +253,46 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { let mut err = struct_span_err!(self.tcx.sess, self.span, E0493, "{}", msg); + if self.mode != Mode::Const { help!(&mut err, "in Nightly builds, add `#![feature(drop_types_in_const)]` \ to the crate attributes to enable"); + } else { + self.find_drop_implementation_method_span() + .map(|span| err.span_label(span, &format!("destructor defined here"))); + + err.span_label(self.span, &format!("constants cannot have destructors")); } + err.emit(); } + fn find_drop_implementation_method_span(&self) -> Option { + self.tcx.lang_items + .drop_trait() + .and_then(|drop_trait_id| { + let mut span = None; + + self.tcx + .lookup_trait_def(drop_trait_id) + .for_each_relevant_impl(self.tcx, self.mir.return_ty, |impl_did| { + self.tcx.map + .as_local_node_id(impl_did) + .and_then(|impl_node_id| self.tcx.map.find(impl_node_id)) + .map(|node| { + if let hir_map::NodeItem(item) = node { + if let hir::ItemImpl(_, _, _, _, _, ref methods) = item.node { + span = methods.first().map(|method| method.span); + } + } + }); + }); + + span + }) + } + /// Check if an Lvalue with the current qualifications could /// be consumed, by either an operand or a Deref projection. fn try_consume(&mut self) -> bool { @@ -277,7 +304,10 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { } else { "cannot refer to statics by value, use a constant instead" }; - span_err!(self.tcx.sess, self.span, E0394, "{}", msg); + struct_span_err!(self.tcx.sess, self.span, E0394, "{}", msg) + .span_label(self.span, &format!("referring to another static by value")) + .note(&format!("use the address-of operator or a constant instead")) + .emit(); // Replace STATIC with NOT_CONST to avoid further errors. self.qualif = self.qualif - Qualif::STATIC; @@ -290,7 +320,7 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { } /// Assign the current qualification to the given destination. - fn assign(&mut self, dest: &Lvalue<'tcx>) { + fn assign(&mut self, dest: &Lvalue<'tcx>, location: Location) { let qualif = self.qualif; let span = self.span; let store = |slot: &mut Option| { @@ -328,7 +358,7 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { // This must be an explicit assignment. _ => { // Catch more errors in the destination. - self.visit_lvalue(dest, LvalueContext::Store); + self.visit_lvalue(dest, LvalueContext::Store, location); self.statement_like(); } } @@ -396,7 +426,10 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { self.qualif = Qualif::NOT_CONST; for index in 0..mir.var_decls.len() { if !self.const_fn_arg_vars.contains(index) { - self.assign(&Lvalue::Var(Var::new(index))); + self.assign(&Lvalue::Var(Var::new(index)), Location { + block: bb, + statement_index: usize::MAX, + }); } } @@ -442,7 +475,10 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { /// For functions (constant or not), it also records /// candidates for promotion in promotion_candidates. impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { - fn visit_lvalue(&mut self, lvalue: &Lvalue<'tcx>, context: LvalueContext) { + fn visit_lvalue(&mut self, + lvalue: &Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { match *lvalue { Lvalue::Arg(_) => { self.add(Qualif::FN_ARGUMENT); @@ -474,7 +510,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } Lvalue::Projection(ref proj) => { self.nest(|this| { - this.super_lvalue(lvalue, context); + this.super_lvalue(lvalue, context, location); match proj.elem { ProjectionElem::Deref => { if !this.try_consume() { @@ -490,9 +526,13 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { if let ty::TyRawPtr(_) = base_ty.sty { this.add(Qualif::NOT_CONST); if this.mode != Mode::Fn { - span_err!(this.tcx.sess, this.span, E0396, - "raw pointers cannot be dereferenced in {}s", - this.mode); + struct_span_err!(this.tcx.sess, + this.span, E0396, + "raw pointers cannot be dereferenced in {}s", + this.mode) + .span_label(this.span, + &format!("dereference of raw pointer in constant")) + .emit(); } } } @@ -520,11 +560,11 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } } - fn visit_operand(&mut self, operand: &Operand<'tcx>) { + fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) { match *operand { Operand::Consume(_) => { self.nest(|this| { - this.super_operand(operand); + this.super_operand(operand, location); this.try_consume(); }); } @@ -536,7 +576,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { if let Literal::Item { def_id, substs } = constant.literal { // Don't peek inside generic (associated) constants. - if !substs.types.is_empty() { + if substs.types().next().is_some() { self.add_type(constant.ty); } else { let qualif = qualify_const_item_cached(self.tcx, @@ -563,18 +603,18 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } } - fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>) { + fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { // Recurse through operands and lvalues. - self.super_rvalue(rvalue); + self.super_rvalue(rvalue, location); match *rvalue { Rvalue::Use(_) | Rvalue::Repeat(..) | Rvalue::UnaryOp(..) | Rvalue::CheckedBinaryOp(..) | - Rvalue::Cast(CastKind::ReifyFnPointer, _, _) | - Rvalue::Cast(CastKind::UnsafeFnPointer, _, _) | - Rvalue::Cast(CastKind::Unsize, _, _) => {} + Rvalue::Cast(CastKind::ReifyFnPointer, ..) | + Rvalue::Cast(CastKind::UnsafeFnPointer, ..) | + Rvalue::Cast(CastKind::Unsize, ..) => {} Rvalue::Len(_) => { // Static lvalues in consts would have errored already, @@ -641,7 +681,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } // We might have a candidate for promotion. - let candidate = Candidate::Ref(self.location); + let candidate = Candidate::Ref(location); if self.mode == Mode::Fn || self.mode == Mode::ConstFn { if !self.qualif.intersects(Qualif::NEVER_PROMOTE) { // We can only promote direct borrows of temps. @@ -678,9 +718,14 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { self.add(Qualif::NOT_CONST); if self.mode != Mode::Fn { - span_err!(self.tcx.sess, self.span, E0395, - "raw pointers cannot be compared in {}s", - self.mode); + struct_span_err!( + self.tcx.sess, self.span, E0395, + "raw pointers cannot be compared in {}s", + self.mode) + .span_label( + self.span, + &format!("comparing raw pointers in static")) + .emit(); } } } @@ -696,7 +741,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } Rvalue::Aggregate(ref kind, _) => { - if let AggregateKind::Adt(def, _, _) = *kind { + if let AggregateKind::Adt(def, ..) = *kind { if def.has_dtor() { self.add(Qualif::NEEDS_DROP); self.deny_drop(); @@ -721,9 +766,12 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } } - fn visit_terminator_kind(&mut self, bb: BasicBlock, kind: &TerminatorKind<'tcx>) { + fn visit_terminator_kind(&mut self, + bb: BasicBlock, + kind: &TerminatorKind<'tcx>, + location: Location) { if let TerminatorKind::Call { ref func, ref args, ref destination, .. } = *kind { - self.visit_operand(func); + self.visit_operand(func, location); let fn_ty = func.ty(self.mir, self.tcx); let (is_shuffle, is_const_fn) = match fn_ty.sty { @@ -737,7 +785,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { for (i, arg) in args.iter().enumerate() { self.nest(|this| { - this.visit_operand(arg); + this.visit_operand(arg, location); if is_shuffle && i == 2 && this.mode == Mode::Fn { let candidate = Candidate::ShuffleIndices(bb); if !this.qualif.intersects(Qualif::NEVER_PROMOTE) { @@ -815,16 +863,20 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { self.deny_drop(); } } - self.assign(dest); + self.assign(dest, location); } } else { // Qualify any operands inside other terminators. - self.super_terminator_kind(bb, kind); + self.super_terminator_kind(bb, kind, location); } } - fn visit_assign(&mut self, _: BasicBlock, dest: &Lvalue<'tcx>, rvalue: &Rvalue<'tcx>) { - self.visit_rvalue(rvalue); + fn visit_assign(&mut self, + _: BasicBlock, + dest: &Lvalue<'tcx>, + rvalue: &Rvalue<'tcx>, + location: Location) { + self.visit_rvalue(rvalue, location); // Check the allowed const fn argument forms. if let (Mode::ConstFn, &Lvalue::Var(index)) = (self.mode, dest) { @@ -845,38 +897,33 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } } - self.assign(dest); + self.assign(dest, location); } fn visit_source_info(&mut self, source_info: &SourceInfo) { self.span = source_info.span; } - fn visit_statement(&mut self, bb: BasicBlock, statement: &Statement<'tcx>) { - assert_eq!(self.location.block, bb); + fn visit_statement(&mut self, bb: BasicBlock, statement: &Statement<'tcx>, location: Location) { self.nest(|this| { this.visit_source_info(&statement.source_info); match statement.kind { StatementKind::Assign(ref lvalue, ref rvalue) => { - this.visit_assign(bb, lvalue, rvalue); + this.visit_assign(bb, lvalue, rvalue, location); } StatementKind::SetDiscriminant { .. } | StatementKind::StorageLive(_) | - StatementKind::StorageDead(_) => {} + StatementKind::StorageDead(_) | + StatementKind::Nop => {} } }); - self.location.statement_index += 1; } - fn visit_terminator(&mut self, bb: BasicBlock, terminator: &Terminator<'tcx>) { - assert_eq!(self.location.block, bb); - self.nest(|this| this.super_terminator(bb, terminator)); - } - - fn visit_basic_block_data(&mut self, bb: BasicBlock, data: &BasicBlockData<'tcx>) { - self.location.statement_index = 0; - self.location.block = bb; - self.super_basic_block_data(bb, data); + fn visit_terminator(&mut self, + bb: BasicBlock, + terminator: &Terminator<'tcx>, + location: Location) { + self.nest(|this| this.super_terminator(bb, terminator, location)); } } @@ -1009,10 +1056,6 @@ impl<'tcx> MirMapPass<'tcx> for QualifyAndPromoteConstants { if let Err(err) = fulfillment_cx.select_all_or_error(&infcx) { infcx.report_fulfillment_errors(&err); } - - if let Err(errors) = fulfillment_cx.select_rfc1592_obligations(&infcx) { - infcx.report_fulfillment_errors_as_warnings(&errors, id); - } }); } } diff --git a/src/librustc_mir/transform/simplify_branches.rs b/src/librustc_mir/transform/simplify_branches.rs index b4960c677a..407e216161 100644 --- a/src/librustc_mir/transform/simplify_branches.rs +++ b/src/librustc_mir/transform/simplify_branches.rs @@ -62,5 +62,5 @@ impl<'l> Pass for SimplifyBranches<'l> { } // avoid calling `type_name` - it contains `<'static>` - fn name(&self) -> &str { "SimplifyBranches" } + fn name(&self) -> ::std::borrow::Cow<'static, str> { "SimplifyBranches".into() } } diff --git a/src/librustc_mir/transform/simplify_cfg.rs b/src/librustc_mir/transform/simplify_cfg.rs index c0e7e54050..8e1b7b4497 100644 --- a/src/librustc_mir/transform/simplify_cfg.rs +++ b/src/librustc_mir/transform/simplify_cfg.rs @@ -64,7 +64,7 @@ impl<'l> Pass for SimplifyCfg<'l> { } // avoid calling `type_name` - it contains `<'static>` - fn name(&self) -> &str { "SimplifyCfg" } + fn name(&self) -> ::std::borrow::Cow<'static, str> { "SimplifyCfg".into() } } pub struct CfgSimplifier<'a, 'tcx: 'a> { diff --git a/src/librustc_mir/transform/type_check.rs b/src/librustc_mir/transform/type_check.rs index bbd2a93659..7b6a2f5580 100644 --- a/src/librustc_mir/transform/type_check.rs +++ b/src/librustc_mir/transform/type_check.rs @@ -20,6 +20,7 @@ use rustc::mir::tcx::LvalueTy; use rustc::mir::transform::{MirPass, MirSource, Pass}; use rustc::mir::visit::{self, Visitor}; use std::fmt; +use syntax::ast; use syntax_pos::{Span, DUMMY_SP}; use rustc_data_structures::indexed_vec::Idx; @@ -68,17 +69,20 @@ impl<'a, 'b, 'gcx, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'gcx, 'tcx> { } } - fn visit_lvalue(&mut self, lvalue: &Lvalue<'tcx>, _context: visit::LvalueContext) { - self.sanitize_lvalue(lvalue); + fn visit_lvalue(&mut self, + lvalue: &Lvalue<'tcx>, + _context: visit::LvalueContext, + location: Location) { + self.sanitize_lvalue(lvalue, location); } - fn visit_constant(&mut self, constant: &Constant<'tcx>) { - self.super_constant(constant); + fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) { + self.super_constant(constant, location); self.sanitize_type(constant, constant.ty); } - fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>) { - self.super_rvalue(rvalue); + fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { + self.super_rvalue(rvalue, location); if let Some(ty) = rvalue.ty(self.mir, self.tcx()) { self.sanitize_type(rvalue, ty); } @@ -124,7 +128,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { } } - fn sanitize_lvalue(&mut self, lvalue: &Lvalue<'tcx>) -> LvalueTy<'tcx> { + fn sanitize_lvalue(&mut self, lvalue: &Lvalue<'tcx>, location: Location) -> LvalueTy<'tcx> { debug!("sanitize_lvalue: {:?}", lvalue); match *lvalue { Lvalue::Var(index) => LvalueTy::Ty { ty: self.mir.var_decls[index].ty }, @@ -136,14 +140,14 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { LvalueTy::Ty { ty: self.mir.return_ty } } Lvalue::Projection(ref proj) => { - let base_ty = self.sanitize_lvalue(&proj.base); + let base_ty = self.sanitize_lvalue(&proj.base, location); if let LvalueTy::Ty { ty } = base_ty { if ty.references_error() { assert!(self.errors_reported); return LvalueTy::Ty { ty: self.tcx().types.err }; } } - self.sanitize_projection(base_ty, &proj.elem, lvalue) + self.sanitize_projection(base_ty, &proj.elem, lvalue, location) } } } @@ -151,7 +155,8 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { fn sanitize_projection(&mut self, base: LvalueTy<'tcx>, pi: &LvalueElem<'tcx>, - lvalue: &Lvalue<'tcx>) + lvalue: &Lvalue<'tcx>, + location: Location) -> LvalueTy<'tcx> { debug!("sanitize_projection: {:?} {:?} {:?}", base, pi, lvalue); let tcx = self.tcx(); @@ -168,7 +173,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { } } ProjectionElem::Index(ref i) => { - self.visit_operand(i); + self.visit_operand(i, location); let index_ty = i.ty(self.mir, tcx); if index_ty != tcx.types.usize { LvalueTy::Ty { @@ -214,7 +219,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { } ProjectionElem::Downcast(adt_def1, index) => match base_ty.sty { - ty::TyEnum(adt_def, substs) if adt_def == adt_def1 => { + ty::TyAdt(adt_def, substs) if adt_def.is_enum() && adt_def == adt_def1 => { if index >= adt_def.variants.len() { LvalueTy::Ty { ty: span_mirbug_and_err!( @@ -277,8 +282,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { (&adt_def.variants[variant_index], substs) } LvalueTy::Ty { ty } => match ty.sty { - ty::TyStruct(adt_def, substs) | ty::TyEnum(adt_def, substs) - if adt_def.is_univariant() => { + ty::TyAdt(adt_def, substs) if adt_def.is_univariant() => { (&adt_def.variants[0], substs) } ty::TyTuple(tys) | ty::TyClosure(_, ty::ClosureSubsts { @@ -358,7 +362,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { StatementKind::SetDiscriminant{ ref lvalue, variant_index } => { let lvalue_type = lvalue.ty(mir, tcx).to_ty(tcx); let adt = match lvalue_type.sty { - TypeVariants::TyEnum(adt, _) => adt, + TypeVariants::TyAdt(adt, _) if adt.is_enum() => adt, _ => { span_bug!(stmt.source_info.span, "bad set discriminant ({:?} = {:?}): lhs is not an enum", @@ -382,6 +386,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { } } } + StatementKind::Nop => {} } } @@ -438,9 +443,10 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { TerminatorKind::Switch { ref discr, adt_def, ref targets } => { let discr_ty = discr.ty(mir, tcx).to_ty(tcx); match discr_ty.sty { - ty::TyEnum(def, _) - if def == adt_def && adt_def.variants.len() == targets.len() - => {}, + ty::TyAdt(def, _) if def.is_enum() && + def == adt_def && + adt_def.variants.len() == targets.len() + => {}, _ => { span_mirbug!(self, term, "bad Switch ({:?} on {:?})", adt_def, discr_ty); @@ -451,7 +457,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { let func_ty = func.ty(mir, tcx); debug!("check_terminator: call, func_ty={:?}", func_ty); let func_ty = match func_ty.sty { - ty::TyFnDef(_, _, func_ty) | ty::TyFnPtr(func_ty) => func_ty, + ty::TyFnDef(.., func_ty) | ty::TyFnPtr(func_ty) => func_ty, _ => { span_mirbug!(self, term, "call to non-function {:?}", func_ty); return; @@ -669,7 +675,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { where T: fmt::Debug + TypeFoldable<'tcx> { let mut selcx = traits::SelectionContext::new(self.infcx); - let cause = traits::ObligationCause::misc(self.last_span, 0); + let cause = traits::ObligationCause::misc(self.last_span, ast::CRATE_NODE_ID); let traits::Normalized { value, obligations } = traits::normalize(&mut selcx, cause, value); diff --git a/src/librustc_passes/ast_validation.rs b/src/librustc_passes/ast_validation.rs index 46124d0f97..5096a574e2 100644 --- a/src/librustc_passes/ast_validation.rs +++ b/src/librustc_passes/ast_validation.rs @@ -20,6 +20,7 @@ use rustc::lint; use rustc::session::Session; use syntax::ast::*; use syntax::attr; +use syntax::codemap::Spanned; use syntax::parse::token::{self, keywords}; use syntax::visit::{self, Visitor}; use syntax_pos::Span; @@ -52,8 +53,11 @@ impl<'a> AstValidator<'a> { span, E0449, "unnecessary visibility qualifier"); + if vis == &Visibility::Public { + err.span_label(span, &format!("`pub` not needed here")); + } if let Some(note) = note { - err.span_note(span, note); + err.note(note); } err.emit(); } @@ -69,6 +73,18 @@ impl<'a> AstValidator<'a> { } } } + + fn check_trait_fn_not_const(&self, constness: Spanned) { + match constness.node { + Constness::Const => { + struct_span_err!(self.session, constness.span, E0379, + "trait fns cannot be declared const") + .span_label(constness.span, &format!("trait fns cannot be const")) + .emit(); + } + _ => {} + } + } } impl<'a> Visitor for AstValidator<'a> { @@ -85,10 +101,10 @@ impl<'a> Visitor for AstValidator<'a> { fn visit_expr(&mut self, expr: &Expr) { match expr.node { - ExprKind::While(_, _, Some(ident)) | + ExprKind::While(.., Some(ident)) | ExprKind::Loop(_, Some(ident)) | - ExprKind::WhileLet(_, _, _, Some(ident)) | - ExprKind::ForLoop(_, _, _, Some(ident)) | + ExprKind::WhileLet(.., Some(ident)) | + ExprKind::ForLoop(.., Some(ident)) | ExprKind::Break(Some(ident)) | ExprKind::Continue(Some(ident)) => { self.check_label(ident.node, ident.span, expr.id); @@ -142,13 +158,16 @@ impl<'a> Visitor for AstValidator<'a> { .span_err(path.span, "type or lifetime parameters in import path"); } } - ItemKind::Impl(_, _, _, Some(..), _, ref impl_items) => { + ItemKind::Impl(.., Some(..), _, ref impl_items) => { self.invalid_visibility(&item.vis, item.span, None); for impl_item in impl_items { self.invalid_visibility(&impl_item.vis, impl_item.span, None); + if let ImplItemKind::Method(ref sig, _) = impl_item.node { + self.check_trait_fn_not_const(sig.constness); + } } } - ItemKind::Impl(_, _, _, None, _, _) => { + ItemKind::Impl(.., None, _, _) => { self.invalid_visibility(&item.vis, item.span, Some("place qualifiers on individual impl items instead")); @@ -169,10 +188,27 @@ impl<'a> Visitor for AstValidator<'a> { } } } + ItemKind::Trait(.., ref trait_items) => { + for trait_item in trait_items { + if let TraitItemKind::Method(ref sig, _) = trait_item.node { + self.check_trait_fn_not_const(sig.constness); + } + } + } ItemKind::Mod(_) => { // Ensure that `path` attributes on modules are recorded as used (c.f. #35584). attr::first_attr_value_str_by_name(&item.attrs, "path"); } + ItemKind::Union(ref vdata, _) => { + if !vdata.is_struct() { + self.err_handler().span_err(item.span, + "tuple and unit unions are not permitted"); + } + if vdata.fields().len() == 0 { + self.err_handler().span_err(item.span, + "unions cannot have zero fields"); + } + } _ => {} } diff --git a/src/librustc_passes/consts.rs b/src/librustc_passes/consts.rs index 0c54f20fe7..f919e42b6b 100644 --- a/src/librustc_passes/consts.rs +++ b/src/librustc_passes/consts.rs @@ -147,8 +147,9 @@ impl<'a, 'gcx> CheckCrateVisitor<'a, 'gcx> { } let mode = match fk { - FnKind::ItemFn(_, _, _, hir::Constness::Const, _, _, _) => Mode::ConstFn, - FnKind::Method(_, m, _, _) => { + FnKind::ItemFn(_, _, _, hir::Constness::Const, ..) + => Mode::ConstFn, + FnKind::Method(_, m, ..) => { if m.constness == hir::Constness::Const { Mode::ConstFn } else { @@ -283,10 +284,10 @@ impl<'a, 'tcx, 'v> Visitor<'v> for CheckCrateVisitor<'a, 'tcx> { Ok(Ordering::Less) | Ok(Ordering::Equal) => {} Ok(Ordering::Greater) => { - span_err!(self.tcx.sess, - start.span, - E0030, - "lower range bound must be less than or equal to upper"); + struct_span_err!(self.tcx.sess, start.span, E0030, + "lower range bound must be less than or equal to upper") + .span_label(start.span, &format!("lower bound larger than upper bound")) + .emit(); } Err(ErrorReported) => {} } @@ -306,8 +307,8 @@ impl<'a, 'tcx, 'v> Visitor<'v> for CheckCrateVisitor<'a, 'tcx> { hir::DeclItem(_) => continue, } } - hir::StmtExpr(_, _) => {} - hir::StmtSemi(_, _) => {} + hir::StmtExpr(..) => {} + hir::StmtSemi(..) => {} } self.add_qualif(ConstQualif::NOT_CONST); } @@ -438,8 +439,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for CheckCrateVisitor<'a, 'tcx> { /// instead of producing errors. fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, e: &hir::Expr, node_ty: Ty<'tcx>) { match node_ty.sty { - ty::TyStruct(def, _) | - ty::TyEnum(def, _) if def.has_dtor() => { + ty::TyAdt(def, _) if def.has_dtor() => { v.add_qualif(ConstQualif::NEEDS_DROP); } _ => {} @@ -669,7 +669,7 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for CheckCrateVisitor<'a, 'gcx> { Categorization::StaticItem => { break; } - Categorization::Deref(ref cmt, _, _) | + Categorization::Deref(ref cmt, ..) | Categorization::Downcast(ref cmt, _) | Categorization::Interior(ref cmt, _) => cur = cmt, @@ -683,7 +683,7 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for CheckCrateVisitor<'a, 'gcx> { borrow_id: ast::NodeId, _borrow_span: Span, cmt: mc::cmt<'tcx>, - _loan_region: ty::Region, + _loan_region: &'tcx ty::Region, bk: ty::BorrowKind, loan_cause: euv::LoanCause) { // Kind of hacky, but we allow Unsafe coercions in constants. @@ -714,7 +714,7 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for CheckCrateVisitor<'a, 'gcx> { // type of the expression. `&mut [1]` has exactly the // same representation as &mut 1. match cmt.ty.sty { - ty::TyArray(_, _) | + ty::TyArray(..) | ty::TySlice(_) => break, _ => {} } @@ -725,7 +725,7 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for CheckCrateVisitor<'a, 'gcx> { Categorization::StaticItem => { break; } - Categorization::Deref(ref cmt, _, _) | + Categorization::Deref(ref cmt, ..) | Categorization::Downcast(ref cmt, _) | Categorization::Interior(ref cmt, _) => { cur = cmt; diff --git a/src/librustc_passes/diagnostics.rs b/src/librustc_passes/diagnostics.rs index 7049040678..89b8aa8141 100644 --- a/src/librustc_passes/diagnostics.rs +++ b/src/librustc_passes/diagnostics.rs @@ -176,6 +176,13 @@ fn some_func() { ``` "##, +E0379: r##" +Trait methods cannot be declared `const` by design. For more information, see +[RFC 911]. + +[RFC 911]: https://github.com/rust-lang/rfcs/pull/911 +"##, + E0449: r##" A visibility qualifier was used when it was unnecessary. Erroneous code examples: diff --git a/src/librustc_passes/lib.rs b/src/librustc_passes/lib.rs index e59c4a6fc4..a4657251c9 100644 --- a/src/librustc_passes/lib.rs +++ b/src/librustc_passes/lib.rs @@ -23,6 +23,7 @@ html_root_url = "https://doc.rust-lang.org/nightly/")] #![cfg_attr(not(stage0), deny(warnings))] +#![feature(dotdot_in_tuple_patterns)] #![feature(rustc_diagnostic_macros)] #![feature(staged_api)] #![feature(rustc_private)] diff --git a/src/librustc_passes/loops.rs b/src/librustc_passes/loops.rs index eab16bd5bd..e942707acd 100644 --- a/src/librustc_passes/loops.rs +++ b/src/librustc_passes/loops.rs @@ -53,7 +53,7 @@ impl<'a, 'v> Visitor<'v> for CheckLoopVisitor<'a> { hir::ExprLoop(ref b, _) => { self.with_context(Loop, |v| v.visit_block(&b)); } - hir::ExprClosure(_, _, ref b, _) => { + hir::ExprClosure(.., ref b, _) => { self.with_context(Closure, |v| v.visit_block(&b)); } hir::ExprBreak(_) => self.require_loop("break", e.span), diff --git a/src/librustc_passes/rvalues.rs b/src/librustc_passes/rvalues.rs index 782ee34edd..c3ef5a72a2 100644 --- a/src/librustc_passes/rvalues.rs +++ b/src/librustc_passes/rvalues.rs @@ -88,7 +88,7 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for RvalueContextDelegate<'a, 'gcx, 'tc _borrow_id: ast::NodeId, _borrow_span: Span, _cmt: mc::cmt, - _loan_region: ty::Region, + _loan_region: &'tcx ty::Region, _bk: ty::BorrowKind, _loan_cause: euv::LoanCause) { } diff --git a/src/librustc_passes/static_recursion.rs b/src/librustc_passes/static_recursion.rs index 8b2943a33c..0336c3063d 100644 --- a/src/librustc_passes/static_recursion.rs +++ b/src/librustc_passes/static_recursion.rs @@ -126,7 +126,7 @@ impl<'a, 'ast: 'a> CheckItemRecursionVisitor<'a, 'ast> { idstack: Vec::new(), } } - fn with_item_id_pushed(&mut self, id: ast::NodeId, f: F) + fn with_item_id_pushed(&mut self, id: ast::NodeId, f: F, span: Span) where F: Fn(&mut Self) { if self.idstack.iter().any(|&x| x == id) { @@ -143,14 +143,16 @@ impl<'a, 'ast: 'a> CheckItemRecursionVisitor<'a, 'ast> { }); if any_static { if !self.sess.features.borrow().static_recursion { - emit_feature_err(&self.sess.parse_sess.span_diagnostic, + emit_feature_err(&self.sess.parse_sess, "static_recursion", *self.root_span, GateIssue::Language, "recursive static"); } } else { - span_err!(self.sess, *self.root_span, E0265, "recursive constant"); + struct_span_err!(self.sess, span, E0265, "recursive constant") + .span_label(span, &format!("recursion not allowed in constant")) + .emit(); } return; } @@ -203,7 +205,7 @@ impl<'a, 'ast: 'a> CheckItemRecursionVisitor<'a, 'ast> { impl<'a, 'ast: 'a> Visitor<'ast> for CheckItemRecursionVisitor<'a, 'ast> { fn visit_item(&mut self, it: &'ast hir::Item) { - self.with_item_id_pushed(it.id, |v| intravisit::walk_item(v, it)); + self.with_item_id_pushed(it.id, |v| intravisit::walk_item(v, it), it.span); } fn visit_enum_def(&mut self, @@ -233,16 +235,16 @@ impl<'a, 'ast: 'a> Visitor<'ast> for CheckItemRecursionVisitor<'a, 'ast> { // If `maybe_expr` is `None`, that's because no discriminant is // specified that affects this variant. Thus, no risk of recursion. if let Some(expr) = maybe_expr { - self.with_item_id_pushed(expr.id, |v| intravisit::walk_expr(v, expr)); + self.with_item_id_pushed(expr.id, |v| intravisit::walk_expr(v, expr), expr.span); } } fn visit_trait_item(&mut self, ti: &'ast hir::TraitItem) { - self.with_item_id_pushed(ti.id, |v| intravisit::walk_trait_item(v, ti)); + self.with_item_id_pushed(ti.id, |v| intravisit::walk_trait_item(v, ti), ti.span); } fn visit_impl_item(&mut self, ii: &'ast hir::ImplItem) { - self.with_item_id_pushed(ii.id, |v| intravisit::walk_impl_item(v, ii)); + self.with_item_id_pushed(ii.id, |v| intravisit::walk_impl_item(v, ii), ii.span); } fn visit_expr(&mut self, e: &'ast hir::Expr) { @@ -270,15 +272,13 @@ impl<'a, 'ast: 'a> Visitor<'ast> for CheckItemRecursionVisitor<'a, 'ast> { // affect the specific variant used, but we need to check // the whole enum definition to see what expression that // might be (if any). - Some(Def::Variant(enum_id, variant_id)) => { - if let Some(enum_node_id) = self.ast_map.as_local_node_id(enum_id) { - if let hir::ItemEnum(ref enum_def, ref generics) = self.ast_map - .expect_item(enum_node_id) - .node { + Some(Def::Variant(variant_id)) => { + if let Some(variant_id) = self.ast_map.as_local_node_id(variant_id) { + let variant = self.ast_map.expect_variant(variant_id); + let enum_id = self.ast_map.get_parent(variant_id); + let enum_item = self.ast_map.expect_item(enum_id); + if let hir::ItemEnum(ref enum_def, ref generics) = enum_item.node { self.populate_enum_discriminants(enum_def); - let enum_id = self.ast_map.as_local_node_id(enum_id).unwrap(); - let variant_id = self.ast_map.as_local_node_id(variant_id).unwrap(); - let variant = self.ast_map.expect_variant(variant_id); self.visit_variant(variant, generics, enum_id); } else { span_bug!(e.span, diff --git a/src/librustc_plugin/lib.rs b/src/librustc_plugin/lib.rs index e60a657ba1..91e0fd636c 100644 --- a/src/librustc_plugin/lib.rs +++ b/src/librustc_plugin/lib.rs @@ -27,7 +27,7 @@ //! //! extern crate rustc; //! -//! use rustc::plugin::Registry; +//! use rustc_plugin::Registry; //! //! #[plugin_registrar] //! pub fn plugin_registrar(reg: &mut Registry) { diff --git a/src/librustc_plugin/load.rs b/src/librustc_plugin/load.rs index fb68eae964..9e56397bc9 100644 --- a/src/librustc_plugin/load.rs +++ b/src/librustc_plugin/load.rs @@ -20,8 +20,6 @@ use std::env; use std::mem; use std::path::PathBuf; use syntax::ast; -use syntax::ptr::P; -use syntax::attr::AttrMetaMethods; use syntax_pos::{Span, COMMAND_LINE_SP}; /// Pointer to a registrar function. @@ -30,7 +28,7 @@ pub type PluginRegistrarFun = pub struct PluginRegistrar { pub fun: PluginRegistrarFun, - pub args: Vec>, + pub args: Vec, } struct PluginLoader<'a> { @@ -69,13 +67,14 @@ pub fn load_plugins(sess: &Session, }; for plugin in plugins { - if plugin.value_str().is_some() { - call_malformed_plugin_attribute(sess, attr.span); - continue; + // plugins must have a name and can't be key = value + match plugin.name() { + Some(ref name) if !plugin.is_value_str() => { + let args = plugin.meta_item_list().map(ToOwned::to_owned); + loader.load_plugin(plugin.span, name, args.unwrap_or_default()); + }, + _ => call_malformed_plugin_attribute(sess, attr.span), } - - let args = plugin.meta_item_list().map(ToOwned::to_owned).unwrap_or_default(); - loader.load_plugin(plugin.span, &plugin.name(), args); } } } @@ -102,7 +101,7 @@ impl<'a> PluginLoader<'a> { } } - fn load_plugin(&mut self, span: Span, name: &str, args: Vec>) { + fn load_plugin(&mut self, span: Span, name: &str, args: Vec) { let registrar = self.reader.find_plugin_registrar(span, name); if let Some((lib, svh, index)) = registrar { diff --git a/src/librustc_plugin/registry.rs b/src/librustc_plugin/registry.rs index 54fa0197de..9c74a644c3 100644 --- a/src/librustc_plugin/registry.rs +++ b/src/librustc_plugin/registry.rs @@ -15,11 +15,9 @@ use rustc::session::Session; use rustc::mir::transform::MirMapPass; -use syntax::ext::base::{SyntaxExtension, NamedSyntaxExtension, NormalTT}; -use syntax::ext::base::{IdentTT, MultiModifier, MultiDecorator}; -use syntax::ext::base::{MacroExpanderFn, MacroRulesTT}; +use syntax::ext::base::{SyntaxExtension, NamedSyntaxExtension, NormalTT, IdentTT}; +use syntax::ext::base::MacroExpanderFn; use syntax::parse::token; -use syntax::ptr::P; use syntax::ast; use syntax::feature_gate::AttributeType; use syntax_pos::Span; @@ -41,7 +39,7 @@ pub struct Registry<'a> { pub sess: &'a Session, #[doc(hidden)] - pub args_hidden: Option>>, + pub args_hidden: Option>, #[doc(hidden)] pub krate_span: Span, @@ -70,11 +68,11 @@ pub struct Registry<'a> { impl<'a> Registry<'a> { #[doc(hidden)] - pub fn new(sess: &'a Session, krate: &ast::Crate) -> Registry<'a> { + pub fn new(sess: &'a Session, krate_span: Span) -> Registry<'a> { Registry { sess: sess, args_hidden: None, - krate_span: krate.span, + krate_span: krate_span, syntax_exts: vec!(), early_lint_passes: vec!(), late_lint_passes: vec!(), @@ -95,7 +93,7 @@ impl<'a> Registry<'a> { /// /// Returns empty slice in case the plugin was loaded /// with `--extra-plugins` - pub fn args<'b>(&'b self) -> &'b [P] { + pub fn args<'b>(&'b self) -> &'b [ast::NestedMetaItem] { self.args_hidden.as_ref().map(|v| &v[..]).unwrap_or(&[]) } @@ -103,6 +101,9 @@ impl<'a> Registry<'a> { /// /// This is the most general hook into `libsyntax`'s expansion behavior. pub fn register_syntax_extension(&mut self, name: ast::Name, extension: SyntaxExtension) { + if name.as_str() == "macro_rules" { + panic!("user-defined macros may not be named `macro_rules`"); + } self.syntax_exts.push((name, match extension { NormalTT(ext, _, allow_internal_unstable) => { NormalTT(ext, Some(self.krate_span), allow_internal_unstable) @@ -110,12 +111,7 @@ impl<'a> Registry<'a> { IdentTT(ext, _, allow_internal_unstable) => { IdentTT(ext, Some(self.krate_span), allow_internal_unstable) } - MultiDecorator(ext) => MultiDecorator(ext), - MultiModifier(ext) => MultiModifier(ext), - MacroRulesTT => { - self.sess.err("plugin tried to register a new MacroRulesTT"); - return; - } + _ => extension, })); } @@ -157,7 +153,6 @@ impl<'a> Registry<'a> { self.llvm_passes.push(name.to_owned()); } - /// Register an attribute with an attribute type. /// /// Registered attributes will bypass the `custom_attribute` feature gate. diff --git a/src/librustc_privacy/lib.rs b/src/librustc_privacy/lib.rs index de9ddcd934..43cdf2942d 100644 --- a/src/librustc_privacy/lib.rs +++ b/src/librustc_privacy/lib.rs @@ -17,6 +17,7 @@ html_root_url = "https://doc.rust-lang.org/nightly/")] #![cfg_attr(not(stage0), deny(warnings))] +#![feature(dotdot_in_tuple_patterns)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] #![feature(staged_api)] @@ -125,10 +126,10 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { let inherited_item_level = match item.node { // Impls inherit level from their types and traits - hir::ItemImpl(_, _, _, None, ref ty, _) => { + hir::ItemImpl(.., None, ref ty, _) => { self.ty_level(&ty) } - hir::ItemImpl(_, _, _, Some(ref trait_ref), ref ty, _) => { + hir::ItemImpl(.., Some(ref trait_ref), ref ty, _) => { cmp::min(self.ty_level(&ty), self.trait_level(trait_ref)) } hir::ItemDefaultImpl(_, ref trait_ref) => { @@ -157,24 +158,24 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> { } } } - hir::ItemImpl(_, _, _, None, _, ref impl_items) => { + hir::ItemImpl(.., None, _, ref impl_items) => { for impl_item in impl_items { if impl_item.vis == hir::Public { self.update(impl_item.id, item_level); } } } - hir::ItemImpl(_, _, _, Some(_), _, ref impl_items) => { + hir::ItemImpl(.., Some(_), _, ref impl_items) => { for impl_item in impl_items { self.update(impl_item.id, item_level); } } - hir::ItemTrait(_, _, _, ref trait_items) => { + hir::ItemTrait(.., ref trait_items) => { for trait_item in trait_items { self.update(trait_item.id, item_level); } } - hir::ItemStruct(ref def, _) => { + hir::ItemStruct(ref def, _) | hir::ItemUnion(ref def, _) => { if !def.is_struct() { self.update(def.id(), item_level); } @@ -204,7 +205,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> { hir::ItemUse(..) => {} // Visit everything hir::ItemConst(..) | hir::ItemStatic(..) | hir::ItemFn(..) | - hir::ItemTrait(..) | hir::ItemTy(..) | hir::ItemImpl(_, _, _, Some(..), _, _) => { + hir::ItemTrait(..) | hir::ItemTy(..) | hir::ItemImpl(.., Some(..), _, _) => { if item_level.is_some() { self.reach().visit_item(item); } @@ -234,7 +235,8 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> { } } // Visit everything except for private fields - hir::ItemStruct(ref struct_def, ref generics) => { + hir::ItemStruct(ref struct_def, ref generics) | + hir::ItemUnion(ref struct_def, ref generics) => { if item_level.is_some() { self.reach().visit_generics(generics); for field in struct_def.fields() { @@ -247,7 +249,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> { // The interface is empty hir::ItemDefaultImpl(..) => {} // Visit everything except for private impl items - hir::ItemImpl(_, _, ref generics, None, _, ref impl_items) => { + hir::ItemImpl(.., ref generics, None, _, ref impl_items) => { if item_level.is_some() { self.reach().visit_generics(generics); for impl_item in impl_items { @@ -320,9 +322,14 @@ impl<'b, 'a, 'tcx: 'a, 'v> Visitor<'v> for ReachEverythingInTheInterfaceVisitor< if let hir::TyPath(_, ref path) = ty.node { let def = self.ev.tcx.expect_def(ty.id); match def { - Def::Struct(def_id) | Def::Enum(def_id) | Def::TyAlias(def_id) | - Def::Trait(def_id) | Def::AssociatedTy(def_id, _) => { - if let Some(node_id) = self.ev.tcx.map.as_local_node_id(def_id) { + Def::Struct(def_id) | Def::Union(def_id) | Def::Enum(def_id) | + Def::TyAlias(def_id) | Def::Trait(def_id) | Def::AssociatedTy(def_id) => { + if let Some(mut node_id) = self.ev.tcx.map.as_local_node_id(def_id) { + // Check the trait for associated types. + if let hir::map::NodeTraitItem(_) = self.ev.tcx.map.get(node_id) { + node_id = self.ev.tcx.map.get_parent(node_id); + } + let item = self.ev.tcx.map.expect_item(node_id); if let Def::TyAlias(..) = def { // Type aliases are substituted. Associated type aliases are not @@ -382,10 +389,11 @@ impl<'a, 'tcx> PrivacyVisitor<'a, 'tcx> { // Checks that a field is in scope. fn check_field(&mut self, span: Span, def: ty::AdtDef<'tcx>, field: ty::FieldDef<'tcx>) { - if def.adt_kind() == ty::AdtKind::Struct && - !field.vis.is_accessible_from(self.curitem, &self.tcx.map) { - span_err!(self.tcx.sess, span, E0451, "field `{}` of struct `{}` is private", - field.name, self.tcx.item_path_str(def.did)); + if !def.is_enum() && !field.vis.is_accessible_from(self.curitem, &self.tcx.map) { + struct_span_err!(self.tcx.sess, span, E0451, "field `{}` of {} `{}` is private", + field.name, def.variant_descr(), self.tcx.item_path_str(def.did)) + .span_label(span, &format!("field `{}` is private", field.name)) + .emit(); } } @@ -425,32 +433,61 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> { let method = self.tcx.tables.borrow().method_map[&method_call]; self.check_method(expr.span, method.def_id); } - hir::ExprStruct(..) => { + hir::ExprStruct(_, ref expr_fields, _) => { let adt = self.tcx.expr_ty(expr).ty_adt_def().unwrap(); let variant = adt.variant_of_def(self.tcx.expect_def(expr.id)); // RFC 736: ensure all unmentioned fields are visible. // Rather than computing the set of unmentioned fields - // (i.e. `all_fields - fields`), just check them all. - for field in &variant.fields { - self.check_field(expr.span, adt, field); + // (i.e. `all_fields - fields`), just check them all, + // unless the ADT is a union, then unmentioned fields + // are not checked. + if adt.is_union() { + for expr_field in expr_fields { + self.check_field(expr.span, adt, variant.field_named(expr_field.name.node)); + } + } else { + for field in &variant.fields { + let expr_field = expr_fields.iter().find(|f| f.name.node == field.name); + let span = if let Some(f) = expr_field { f.span } else { expr.span }; + self.check_field(span, adt, field); + } } } hir::ExprPath(..) => { if let Def::Struct(..) = self.tcx.expect_def(expr.id) { let expr_ty = self.tcx.expr_ty(expr); let def = match expr_ty.sty { - ty::TyFnDef(_, _, &ty::BareFnTy { sig: ty::Binder(ty::FnSig { + ty::TyFnDef(.., &ty::BareFnTy { sig: ty::Binder(ty::FnSig { output: ty, .. }), ..}) => ty, _ => expr_ty }.ty_adt_def().unwrap(); - let any_priv = def.struct_variant().fields.iter().any(|f| { - !f.vis.is_accessible_from(self.curitem, &self.tcx.map) - }); - if any_priv { - span_err!(self.tcx.sess, expr.span, E0450, - "cannot invoke tuple struct constructor with private \ - fields"); + + let private_indexes : Vec<_> = def.struct_variant().fields.iter().enumerate() + .filter(|&(_,f)| { + !f.vis.is_accessible_from(self.curitem, &self.tcx.map) + }).map(|(n,&_)|n).collect(); + + if !private_indexes.is_empty() { + + let mut error = struct_span_err!(self.tcx.sess, expr.span, E0450, + "cannot invoke tuple struct constructor \ + with private fields"); + error.span_label(expr.span, + &format!("cannot construct with a private field")); + + if let Some(def_id) = self.tcx.map.as_local_node_id(def.did) { + if let Some(hir::map::NodeItem(node)) = self.tcx.map.find(def_id) { + if let hir::Item_::ItemStruct(ref tuple_data, _) = node.node { + + for i in private_indexes { + error.span_label(tuple_data.fields()[i].span, + &format!("private field declared here")); + } + } + } + } + error.emit(); } } } @@ -472,12 +509,13 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> { let adt = self.tcx.pat_ty(pattern).ty_adt_def().unwrap(); let variant = adt.variant_of_def(self.tcx.expect_def(pattern.id)); for field in fields { - self.check_field(pattern.span, adt, variant.field_named(field.node.name)); + self.check_field(field.span, adt, variant.field_named(field.node.name)); } } PatKind::TupleStruct(_, ref fields, ddpos) => { match self.tcx.pat_ty(pattern).sty { - ty::TyStruct(def, _) => { + // enum fields have no privacy at this time + ty::TyAdt(def, _) if !def.is_enum() => { let expected_len = def.struct_variant().fields.len(); for (i, field) in fields.iter().enumerate_and_adjust(expected_len, ddpos) { if let PatKind::Wild = field.node { @@ -486,9 +524,6 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> { self.check_field(field.span, def, &def.struct_variant().fields[i]); } } - ty::TyEnum(..) => { - // enum fields have no privacy at this time - } _ => {} } } @@ -610,7 +645,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> // namespace (the contents have their own privacies). hir::ItemForeignMod(_) => {} - hir::ItemTrait(_, _, ref bounds, _) => { + hir::ItemTrait(.., ref bounds, _) => { if !self.trait_is_public(item.id) { return } @@ -625,7 +660,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> // (i.e. we could just return here to not check them at // all, or some worse estimation of whether an impl is // publicly visible). - hir::ItemImpl(_, _, ref g, ref trait_ref, ref self_, ref impl_items) => { + hir::ItemImpl(.., ref g, ref trait_ref, ref self_, ref impl_items) => { // `impl [... for] Private` is never visible. let self_contains_private; // impl [... for] Public<...>, but not `impl [... for] @@ -916,10 +951,15 @@ impl<'a, 'tcx: 'a, 'v> Visitor<'v> for SearchInterfaceForPrivateItemsVisitor<'a, // free type aliases, but this isn't done yet. return } - Def::Struct(def_id) | Def::Enum(def_id) | Def::TyAlias(def_id) | - Def::Trait(def_id) | Def::AssociatedTy(def_id, _) => { + Def::Struct(def_id) | Def::Union(def_id) | Def::Enum(def_id) | + Def::TyAlias(def_id) | Def::Trait(def_id) | Def::AssociatedTy(def_id) => { // Non-local means public (private items can't leave their crate, modulo bugs) - if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) { + if let Some(mut node_id) = self.tcx.map.as_local_node_id(def_id) { + // Check the trait for associated types. + if let hir::map::NodeTraitItem(_) = self.tcx.map.get(node_id) { + node_id = self.tcx.map.get_parent(node_id); + } + let item = self.tcx.map.expect_item(node_id); let vis = match self.substituted_alias_visibility(item, path) { Some(vis) => vis, @@ -932,8 +972,10 @@ impl<'a, 'tcx: 'a, 'v> Visitor<'v> for SearchInterfaceForPrivateItemsVisitor<'a, if !vis.is_at_least(self.required_visibility, &self.tcx.map) { if self.tcx.sess.features.borrow().pub_restricted || self.old_error_set.contains(&ty.id) { - span_err!(self.tcx.sess, ty.span, E0446, + let mut err = struct_span_err!(self.tcx.sess, ty.span, E0446, "private type in public interface"); + err.span_label(ty.span, &format!("can't leak private type")); + err.emit(); } else { self.tcx.sess.add_lint(lint::builtin::PRIVATE_IN_PUBLIC, node_id, @@ -964,8 +1006,11 @@ impl<'a, 'tcx: 'a, 'v> Visitor<'v> for SearchInterfaceForPrivateItemsVisitor<'a, if !vis.is_at_least(self.required_visibility, &self.tcx.map) { if self.tcx.sess.features.borrow().pub_restricted || self.old_error_set.contains(&trait_ref.ref_id) { - span_err!(self.tcx.sess, trait_ref.path.span, E0445, - "private trait in public interface"); + struct_span_err!(self.tcx.sess, trait_ref.path.span, E0445, + "private trait in public interface") + .span_label(trait_ref.path.span, &format!( + "private trait can't be public")) + .emit(); } else { self.tcx.sess.add_lint(lint::builtin::PRIVATE_IN_PUBLIC, node_id, @@ -1038,8 +1083,9 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivateItemsInPublicInterfacesVisitor<'a, 'tc check.visit_foreign_item(foreign_item); } } - // Subitems of structs have their own publicity - hir::ItemStruct(ref struct_def, ref generics) => { + // Subitems of structs and unions have their own publicity + hir::ItemStruct(ref struct_def, ref generics) | + hir::ItemUnion(ref struct_def, ref generics) => { check.required_visibility = item_visibility; check.visit_generics(generics); @@ -1053,7 +1099,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivateItemsInPublicInterfacesVisitor<'a, 'tc hir::ItemDefaultImpl(..) => {} // An inherent impl is public when its type is public // Subitems of inherent impls have their own publicity - hir::ItemImpl(_, _, ref generics, None, ref ty, ref impl_items) => { + hir::ItemImpl(.., ref generics, None, ref ty, ref impl_items) => { let ty_vis = self.ty_visibility(ty); check.required_visibility = ty_vis; check.visit_generics(generics); @@ -1067,7 +1113,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivateItemsInPublicInterfacesVisitor<'a, 'tc } // A trait impl is public when both its type and its trait are public // Subitems of trait impls have inherited publicity - hir::ItemImpl(_, _, ref generics, Some(ref trait_ref), ref ty, ref impl_items) => { + hir::ItemImpl(.., ref generics, Some(ref trait_ref), ref ty, ref impl_items) => { let vis = min(self.ty_visibility(ty), self.trait_ref_visibility(trait_ref)); check.required_visibility = vis; check.visit_generics(generics); diff --git a/src/librustc_resolve/assign_ids.rs b/src/librustc_resolve/assign_ids.rs deleted file mode 100644 index 70e566de8a..0000000000 --- a/src/librustc_resolve/assign_ids.rs +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use Resolver; -use rustc::session::Session; -use syntax::ast; -use syntax::ext::hygiene::Mark; -use syntax::fold::{self, Folder}; -use syntax::ptr::P; -use syntax::util::move_map::MoveMap; -use syntax::util::small_vector::SmallVector; - -use std::collections::HashMap; -use std::mem; - -impl<'a> Resolver<'a> { - pub fn assign_node_ids(&mut self, krate: ast::Crate) -> ast::Crate { - NodeIdAssigner { - sess: self.session, - macros_at_scope: &mut self.macros_at_scope, - }.fold_crate(krate) - } -} - -struct NodeIdAssigner<'a> { - sess: &'a Session, - macros_at_scope: &'a mut HashMap>, -} - -impl<'a> Folder for NodeIdAssigner<'a> { - fn new_id(&mut self, old_id: ast::NodeId) -> ast::NodeId { - assert_eq!(old_id, ast::DUMMY_NODE_ID); - self.sess.next_node_id() - } - - fn fold_block(&mut self, block: P) -> P { - block.map(|mut block| { - block.id = self.new_id(block.id); - - let stmt = block.stmts.pop(); - let mut macros = Vec::new(); - block.stmts = block.stmts.move_flat_map(|stmt| { - if let ast::StmtKind::Item(ref item) = stmt.node { - if let ast::ItemKind::Mac(..) = item.node { - macros.push(item.ident.ctxt.data().outer_mark); - return None; - } - } - - let stmt = self.fold_stmt(stmt).pop().unwrap(); - if !macros.is_empty() { - self.macros_at_scope.insert(stmt.id, mem::replace(&mut macros, Vec::new())); - } - Some(stmt) - }); - - stmt.and_then(|mut stmt| { - // Avoid wasting a node id on a trailing expression statement, - // which shares a HIR node with the expression itself. - if let ast::StmtKind::Expr(expr) = stmt.node { - let expr = self.fold_expr(expr); - stmt.id = expr.id; - stmt.node = ast::StmtKind::Expr(expr); - Some(stmt) - } else { - self.fold_stmt(stmt).pop() - } - }).map(|stmt| { - if !macros.is_empty() { - self.macros_at_scope.insert(stmt.id, mem::replace(&mut macros, Vec::new())); - } - block.stmts.push(stmt); - }); - - block - }) - } - - fn fold_item(&mut self, item: P) -> SmallVector> { - match item.node { - ast::ItemKind::Mac(..) => SmallVector::zero(), - _ => fold::noop_fold_item(item, self), - } - } -} diff --git a/src/librustc_resolve/build_reduced_graph.rs b/src/librustc_resolve/build_reduced_graph.rs index 116c1b7a6d..9202f8c094 100644 --- a/src/librustc_resolve/build_reduced_graph.rs +++ b/src/librustc_resolve/build_reduced_graph.rs @@ -14,17 +14,18 @@ //! any imports resolved. use resolve_imports::ImportDirectiveSubclass::{self, GlobImport}; -use Module; +use {Module, ModuleS, ModuleKind}; use Namespace::{self, TypeNS, ValueNS}; use {NameBinding, NameBindingKind, ToNameBinding}; -use ParentLink::{ModuleParentLink, BlockParentLink}; use Resolver; use {resolve_error, resolve_struct_error, ResolutionError}; -use rustc::middle::cstore::{ChildItem, DlDef}; use rustc::hir::def::*; use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId}; -use rustc::ty::{self, VariantKind}; +use rustc::hir::map::DefPathData; +use rustc::ty; + +use std::cell::Cell; use syntax::ast::Name; use syntax::attr; @@ -32,9 +33,9 @@ use syntax::parse::token; use syntax::ast::{Block, Crate}; use syntax::ast::{ForeignItem, ForeignItemKind, Item, ItemKind}; -use syntax::ast::{Mutability, PathListItemKind}; -use syntax::ast::{StmtKind, TraitItemKind}; +use syntax::ast::{Mutability, StmtKind, TraitItem, TraitItemKind}; use syntax::ast::{Variant, ViewPathGlob, ViewPathList, ViewPathSimple}; +use syntax::parse::token::keywords; use syntax::visit::{self, Visitor}; use syntax_pos::{Span, DUMMY_SP}; @@ -54,14 +55,7 @@ impl<'a> ToNameBinding<'a> for (Def, Span, ty::Visibility) { impl<'b> Resolver<'b> { /// Constructs the reduced graph for the entire crate. pub fn build_reduced_graph(&mut self, krate: &Crate) { - let no_implicit_prelude = attr::contains_name(&krate.attrs, "no_implicit_prelude"); - self.graph_root.no_implicit_prelude.set(no_implicit_prelude); - - let mut visitor = BuildReducedGraphVisitor { - parent: self.graph_root, - resolver: self, - }; - visit::walk_crate(&mut visitor, krate); + visit::walk_crate(&mut BuildReducedGraphVisitor { resolver: self }, krate); } /// Defines `name` in namespace `ns` of module `parent` to be `def` if it is not yet defined; @@ -84,11 +78,12 @@ impl<'b> Resolver<'b> { } /// Constructs the reduced graph for one item. - fn build_reduced_graph_for_item(&mut self, item: &Item, parent_ref: &mut Module<'b>) { - let parent = *parent_ref; + fn build_reduced_graph_for_item(&mut self, item: &Item) { + self.crate_loader.process_item(item, &self.definitions); + + let parent = self.current_module; let name = item.ident.name; let sp = item.span; - self.current_module = parent; let vis = self.resolve_visibility(&item.vis); match item.node { @@ -130,15 +125,15 @@ impl<'b> Resolver<'b> { let subclass = ImportDirectiveSubclass::single(binding.name, source_name); let span = view_path.span; - parent.add_import_directive(module_path, subclass, span, item.id, vis); - self.unresolved_imports += 1; + self.add_import_directive(module_path, subclass, span, item.id, vis); } ViewPathList(_, ref source_items) => { // Make sure there's at most one `mod` import in the list. let mod_spans = source_items.iter().filter_map(|item| { - match item.node { - PathListItemKind::Mod { .. } => Some(item.span), - _ => None, + if item.node.name.name == keywords::SelfValue.name() { + Some(item.span) + } else { + None } }).collect::>(); @@ -153,10 +148,12 @@ impl<'b> Resolver<'b> { } for source_item in source_items { - let (module_path, name, rename) = match source_item.node { - PathListItemKind::Ident { name, rename, .. } => - (module_path.clone(), name.name, rename.unwrap_or(name).name), - PathListItemKind::Mod { rename, .. } => { + let node = source_item.node; + let (module_path, name, rename) = { + if node.name.name != keywords::SelfValue.name() { + let rename = node.rename.unwrap_or(node.name).name; + (module_path.clone(), node.name.name, rename) + } else { let name = match module_path.last() { Some(name) => *name, None => { @@ -170,21 +167,22 @@ impl<'b> Resolver<'b> { } }; let module_path = module_path.split_last().unwrap().1; - let rename = rename.map(|i| i.name).unwrap_or(name); + let rename = node.rename.map(|i| i.name).unwrap_or(name); (module_path.to_vec(), name, rename) } }; let subclass = ImportDirectiveSubclass::single(rename, name); - let (span, id) = (source_item.span, source_item.node.id()); - parent.add_import_directive(module_path, subclass, span, id, vis); - self.unresolved_imports += 1; + let (span, id) = (source_item.span, source_item.node.id); + self.add_import_directive(module_path, subclass, span, id, vis); } } ViewPathGlob(_) => { - let subclass = GlobImport { is_prelude: is_prelude }; + let subclass = GlobImport { + is_prelude: is_prelude, + max_vis: Cell::new(ty::Visibility::PrivateExternal), + }; let span = view_path.span; - parent.add_import_directive(module_path, subclass, span, item.id, vis); - self.unresolved_imports += 1; + self.add_import_directive(module_path, subclass, span, item.id, vis); } } } @@ -197,26 +195,31 @@ impl<'b> Resolver<'b> { krate: crate_id, index: CRATE_DEF_INDEX, }; - let parent_link = ModuleParentLink(parent, name); - let def = Def::Mod(def_id); - let module = self.new_extern_crate_module(parent_link, def, item.id); + let module = self.arenas.alloc_module(ModuleS { + extern_crate_id: Some(item.id), + populated: Cell::new(false), + ..ModuleS::new(Some(parent), ModuleKind::Def(Def::Mod(def_id), name)) + }); self.define(parent, name, TypeNS, (module, sp, vis)); - self.build_reduced_graph_for_external_crate(module); + self.populate_module_if_necessary(module); } } ItemKind::Mod(..) => { - let parent_link = ModuleParentLink(parent, name); let def = Def::Mod(self.definitions.local_def_id(item.id)); - let module = self.new_module(parent_link, Some(def), false); - module.no_implicit_prelude.set({ - parent.no_implicit_prelude.get() || + let module = self.arenas.alloc_module(ModuleS { + no_implicit_prelude: parent.no_implicit_prelude || { attr::contains_name(&item.attrs, "no_implicit_prelude") + }, + normal_ancestor_id: Some(item.id), + ..ModuleS::new(Some(parent), ModuleKind::Def(def, name)) }); self.define(parent, name, TypeNS, (module, sp, vis)); self.module_map.insert(item.id, module); - *parent_ref = module; + + // Descend into the module. + self.current_module = module; } ItemKind::ForeignMod(..) => {} @@ -227,11 +230,11 @@ impl<'b> Resolver<'b> { let def = Def::Static(self.definitions.local_def_id(item.id), mutbl); self.define(parent, name, ValueNS, (def, sp, vis)); } - ItemKind::Const(_, _) => { + ItemKind::Const(..) => { let def = Def::Const(self.definitions.local_def_id(item.id)); self.define(parent, name, ValueNS, (def, sp, vis)); } - ItemKind::Fn(_, _, _, _, _, _) => { + ItemKind::Fn(..) => { let def = Def::Fn(self.definitions.local_def_id(item.id)); self.define(parent, name, ValueNS, (def, sp, vis)); } @@ -243,14 +246,12 @@ impl<'b> Resolver<'b> { } ItemKind::Enum(ref enum_definition, _) => { - let parent_link = ModuleParentLink(parent, name); let def = Def::Enum(self.definitions.local_def_id(item.id)); - let module = self.new_module(parent_link, Some(def), false); + let module = self.new_module(parent, ModuleKind::Def(def, name), true); self.define(parent, name, TypeNS, (module, sp, vis)); for variant in &(*enum_definition).variants { - let item_def_id = self.definitions.local_def_id(item.id); - self.build_reduced_graph_for_variant(variant, item_def_id, module, vis); + self.build_reduced_graph_for_variant(variant, module, vis); } } @@ -260,8 +261,8 @@ impl<'b> Resolver<'b> { let def = Def::Struct(self.definitions.local_def_id(item.id)); self.define(parent, name, TypeNS, (def, sp, vis)); - // If this is a newtype or unit-like struct, define a name - // in the value namespace as well + // If this is a tuple or unit struct, define a name + // in the value namespace as well. if !struct_def.is_struct() { let def = Def::Struct(self.definitions.local_def_id(struct_def.id())); self.define(parent, name, ValueNS, (def, sp, vis)); @@ -277,35 +278,30 @@ impl<'b> Resolver<'b> { self.structs.insert(item_def_id, field_names); } - ItemKind::DefaultImpl(_, _) | ItemKind::Impl(..) => {} + ItemKind::Union(ref vdata, _) => { + let def = Def::Union(self.definitions.local_def_id(item.id)); + self.define(parent, name, TypeNS, (def, sp, vis)); - ItemKind::Trait(_, _, _, ref items) => { - let def_id = self.definitions.local_def_id(item.id); + // Record the def ID and fields of this union. + let field_names = vdata.fields().iter().enumerate().map(|(index, field)| { + self.resolve_visibility(&field.vis); + field.ident.map(|ident| ident.name) + .unwrap_or_else(|| token::intern(&index.to_string())) + }).collect(); + let item_def_id = self.definitions.local_def_id(item.id); + self.structs.insert(item_def_id, field_names); + } - // Add all the items within to a new module. - let parent_link = ModuleParentLink(parent, name); - let def = Def::Trait(def_id); - let module_parent = self.new_module(parent_link, Some(def), false); - self.define(parent, name, TypeNS, (module_parent, sp, vis)); - - // Add the names of all the items to the trait info. - for item in items { - let item_def_id = self.definitions.local_def_id(item.id); - let mut is_static_method = false; - let (def, ns) = match item.node { - TraitItemKind::Const(..) => (Def::AssociatedConst(item_def_id), ValueNS), - TraitItemKind::Method(ref sig, _) => { - is_static_method = !sig.decl.has_self(); - (Def::Method(item_def_id), ValueNS) - } - TraitItemKind::Type(..) => (Def::AssociatedTy(def_id, item_def_id), TypeNS), - TraitItemKind::Macro(_) => panic!("unexpanded macro in resolve!"), - }; + ItemKind::DefaultImpl(..) | ItemKind::Impl(..) => {} - self.define(module_parent, item.ident.name, ns, (def, item.span, vis)); + ItemKind::Trait(..) => { + let def_id = self.definitions.local_def_id(item.id); - self.trait_item_map.insert((item.ident.name, def_id), is_static_method); - } + // Add all the items within to a new module. + let module = + self.new_module(parent, ModuleKind::Def(Def::Trait(def_id), name), true); + self.define(parent, name, TypeNS, (module, sp, vis)); + self.current_module = module; } ItemKind::Mac(_) => panic!("unexpanded macro in resolve!"), } @@ -315,7 +311,6 @@ impl<'b> Resolver<'b> { // type and value namespaces. fn build_reduced_graph_for_variant(&mut self, variant: &Variant, - item_id: DefId, parent: Module<'b>, vis: ty::Visibility) { let name = variant.node.name.name; @@ -327,15 +322,14 @@ impl<'b> Resolver<'b> { // Variants are always treated as importable to allow them to be glob used. // All variants are defined in both type and value namespaces as future-proofing. - let def = Def::Variant(item_id, self.definitions.local_def_id(variant.node.data.id())); + let def = Def::Variant(self.definitions.local_def_id(variant.node.data.id())); self.define(parent, name, ValueNS, (def, variant.span, vis)); self.define(parent, name, TypeNS, (def, variant.span, vis)); } /// Constructs the reduced graph for one foreign item. - fn build_reduced_graph_for_foreign_item(&mut self, - foreign_item: &ForeignItem, - parent: Module<'b>) { + fn build_reduced_graph_for_foreign_item(&mut self, foreign_item: &ForeignItem) { + let parent = self.current_module; let name = foreign_item.ident.name; let def = match foreign_item.node { @@ -346,12 +340,12 @@ impl<'b> Resolver<'b> { Def::Static(self.definitions.local_def_id(foreign_item.id), m) } }; - self.current_module = parent; let vis = self.resolve_visibility(&foreign_item.vis); self.define(parent, name, ValueNS, (def, foreign_item.span, vis)); } - fn build_reduced_graph_for_block(&mut self, block: &Block, parent: &mut Module<'b>) { + fn build_reduced_graph_for_block(&mut self, block: &Block) { + let parent = self.current_module; if self.block_needs_anonymous_module(block) { let block_id = block.id; @@ -359,46 +353,44 @@ impl<'b> Resolver<'b> { {}", block_id); - let parent_link = BlockParentLink(parent, block_id); - let new_module = self.new_module(parent_link, None, false); + let new_module = self.new_module(parent, ModuleKind::Block(block_id), true); self.module_map.insert(block_id, new_module); - *parent = new_module; + self.current_module = new_module; // Descend into the block. } } /// Builds the reduced graph for a single item in an external crate. - fn build_reduced_graph_for_external_crate_def(&mut self, parent: Module<'b>, xcdef: ChildItem) { - let def = match xcdef.def { - DlDef(def) => def, - _ => return, - }; - - if let Def::ForeignMod(def_id) = def { - // Foreign modules have no names. Recur and populate eagerly. - for child in self.session.cstore.item_children(def_id) { - self.build_reduced_graph_for_external_crate_def(parent, child); - } + fn build_reduced_graph_for_external_crate_def(&mut self, parent: Module<'b>, + child: Export) { + let def_id = child.def_id; + let name = child.name; + + let def = if let Some(def) = self.session.cstore.describe_def(def_id) { + def + } else { return; - } + }; - let name = xcdef.name; - let vis = if parent.is_trait() { ty::Visibility::Public } else { xcdef.vis }; + let vis = if parent.is_trait() { + ty::Visibility::Public + } else { + self.session.cstore.visibility(def_id) + }; match def { - Def::Mod(_) | Def::ForeignMod(_) | Def::Enum(..) => { + Def::Mod(_) | Def::Enum(..) => { debug!("(building reduced graph for external crate) building module {} {:?}", name, vis); - let parent_link = ModuleParentLink(parent, name); - let module = self.new_module(parent_link, Some(def), true); + let module = self.new_module(parent, ModuleKind::Def(def, name), false); let _ = self.try_define(parent, name, TypeNS, (module, DUMMY_SP, vis)); } - Def::Variant(_, variant_id) => { + Def::Variant(variant_id) => { debug!("(building reduced graph for external crate) building variant {}", name); // Variants are always treated as importable to allow them to be glob used. // All variants are defined in both type and value namespaces as future-proofing. let _ = self.try_define(parent, name, TypeNS, (def, DUMMY_SP, vis)); let _ = self.try_define(parent, name, ValueNS, (def, DUMMY_SP, vis)); - if self.session.cstore.variant_kind(variant_id) == Some(VariantKind::Struct) { + if self.session.cstore.variant_kind(variant_id) == Some(ty::VariantKind::Struct) { // Not adding fields for variants as they are not accessed with a self receiver self.structs.insert(variant_id, Vec::new()); } @@ -412,16 +404,18 @@ impl<'b> Resolver<'b> { name); let _ = self.try_define(parent, name, ValueNS, (def, DUMMY_SP, vis)); } - Def::Trait(def_id) => { + Def::Trait(_) => { debug!("(building reduced graph for external crate) building type {}", name); // If this is a trait, add all the trait item names to the trait // info. - let trait_item_def_ids = self.session.cstore.trait_item_def_ids(def_id); - for trait_item_def in &trait_item_def_ids { + let trait_item_def_ids = self.session.cstore.impl_or_trait_items(def_id); + for &trait_item_def in &trait_item_def_ids { let trait_item_name = - self.session.cstore.item_name(trait_item_def.def_id()); + self.session.cstore.def_key(trait_item_def) + .disambiguated_data.data.get_opt_name() + .expect("opt_item_name returned None for trait"); debug!("(building reduced graph for external crate) ... adding trait item \ '{}'", @@ -430,16 +424,17 @@ impl<'b> Resolver<'b> { self.trait_item_map.insert((trait_item_name, def_id), false); } - let parent_link = ModuleParentLink(parent, name); - let module = self.new_module(parent_link, Some(def), true); + let module = self.new_module(parent, ModuleKind::Def(def, name), false); let _ = self.try_define(parent, name, TypeNS, (module, DUMMY_SP, vis)); } Def::TyAlias(..) | Def::AssociatedTy(..) => { debug!("(building reduced graph for external crate) building type {}", name); let _ = self.try_define(parent, name, TypeNS, (def, DUMMY_SP, vis)); } - Def::Struct(def_id) - if self.session.cstore.tuple_struct_definition_if_ctor(def_id).is_none() => { + Def::Struct(_) + if self.session.cstore.def_key(def_id).disambiguated_data.data != + DefPathData::StructCtor + => { debug!("(building reduced graph for external crate) building type and value for {}", name); let _ = self.try_define(parent, name, TypeNS, (def, DUMMY_SP, vis)); @@ -452,6 +447,13 @@ impl<'b> Resolver<'b> { let fields = self.session.cstore.struct_field_names(def_id); self.structs.insert(def_id, fields); } + Def::Union(_) => { + let _ = self.try_define(parent, name, TypeNS, (def, DUMMY_SP, vis)); + + // Record the def ID and fields of this union. + let fields = self.session.cstore.struct_field_names(def_id); + self.structs.insert(def_id, fields); + } Def::Struct(..) => {} Def::Local(..) | Def::PrimTy(..) | @@ -465,15 +467,6 @@ impl<'b> Resolver<'b> { } } - /// Builds the reduced graph rooted at the 'use' directive for an external - /// crate. - fn build_reduced_graph_for_external_crate(&mut self, root: Module<'b>) { - let root_cnum = root.def_id().unwrap().krate; - for child in self.session.cstore.crate_top_level_items(root_cnum) { - self.build_reduced_graph_for_external_crate_def(root, child); - } - } - /// Ensures that the reduced graph rooted at the given external module /// is built, building it if it is not. pub fn populate_module_if_necessary(&mut self, module: Module<'b>) { @@ -487,25 +480,51 @@ impl<'b> Resolver<'b> { struct BuildReducedGraphVisitor<'a, 'b: 'a> { resolver: &'a mut Resolver<'b>, - parent: Module<'b>, } impl<'a, 'b> Visitor for BuildReducedGraphVisitor<'a, 'b> { fn visit_item(&mut self, item: &Item) { - let old_parent = self.parent; - self.resolver.build_reduced_graph_for_item(item, &mut self.parent); + let parent = self.resolver.current_module; + self.resolver.build_reduced_graph_for_item(item); visit::walk_item(self, item); - self.parent = old_parent; + self.resolver.current_module = parent; } fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) { - self.resolver.build_reduced_graph_for_foreign_item(foreign_item, &self.parent); + self.resolver.build_reduced_graph_for_foreign_item(foreign_item); } fn visit_block(&mut self, block: &Block) { - let old_parent = self.parent; - self.resolver.build_reduced_graph_for_block(block, &mut self.parent); + let parent = self.resolver.current_module; + self.resolver.build_reduced_graph_for_block(block); visit::walk_block(self, block); - self.parent = old_parent; + self.resolver.current_module = parent; + } + + fn visit_trait_item(&mut self, item: &TraitItem) { + let parent = self.resolver.current_module; + let def_id = parent.def_id().unwrap(); + + // Add the item to the trait info. + let item_def_id = self.resolver.definitions.local_def_id(item.id); + let mut is_static_method = false; + let (def, ns) = match item.node { + TraitItemKind::Const(..) => (Def::AssociatedConst(item_def_id), ValueNS), + TraitItemKind::Method(ref sig, _) => { + is_static_method = !sig.decl.has_self(); + (Def::Method(item_def_id), ValueNS) + } + TraitItemKind::Type(..) => (Def::AssociatedTy(item_def_id), TypeNS), + TraitItemKind::Macro(_) => panic!("unexpanded macro in resolve!"), + }; + + self.resolver.trait_item_map.insert((item.ident.name, def_id), is_static_method); + + let vis = ty::Visibility::Public; + self.resolver.define(parent, item.ident.name, ns, (def, item.span, vis)); + + self.resolver.current_module = parent.parent.unwrap(); // nearest normal ancestor + visit::walk_trait_item(self, item); + self.resolver.current_module = parent; } } diff --git a/src/librustc_resolve/check_unused.rs b/src/librustc_resolve/check_unused.rs index 3084d9abbe..93abe07128 100644 --- a/src/librustc_resolve/check_unused.rs +++ b/src/librustc_resolve/check_unused.rs @@ -95,13 +95,13 @@ impl<'a, 'b> Visitor for UnusedImportCheckVisitor<'a, 'b> { } ast::ItemKind::Use(ref p) => { match p.node { - ViewPathSimple(_, _) => { + ViewPathSimple(..) => { self.check_import(item.id, p.span) } ViewPathList(_, ref list) => { for i in list { - self.check_import(i.node.id(), i.span); + self.check_import(i.node.id, i.span); } } ViewPathGlob(_) => { diff --git a/src/librustc_resolve/diagnostics.rs b/src/librustc_resolve/diagnostics.rs index 11ef75ee6a..f8f90bdb4e 100644 --- a/src/librustc_resolve/diagnostics.rs +++ b/src/librustc_resolve/diagnostics.rs @@ -891,7 +891,7 @@ A `struct` variant name was used like a function name. Erroneous code example: ```compile_fail,E0423 -struct Foo { a: bool}; +struct Foo { a: bool }; let f = Foo(); // error: `Foo` is a struct variant name, but this expression uses @@ -1270,7 +1270,42 @@ trait Foo {} impl Foo for i32 {} ``` -"## +"##, + +E0530: r##" +A binding shadowed something it shouldn't. + +Erroneous code example: + +```compile_fail,E0530 +static TEST: i32 = 0; + +let r: (i32, i32) = (0, 0); +match r { + TEST => {} // error: match bindings cannot shadow statics +} +``` + +To fix this error, just change the binding's name in order to avoid shadowing +one of the following: + +* struct name +* struct/enum variant +* static +* const +* associated const + +Fixed example: + +``` +static TEST: i32 = 0; + +let r: (i32, i32) = (0, 0); +match r { + something => {} // ok! +} +``` +"##, } @@ -1289,7 +1324,6 @@ register_diagnostics! { // E0419, merged into 531 // E0420, merged into 532 // E0421, merged into 531 - E0530, // X bindings cannot shadow Ys E0531, // unresolved pattern path kind `name` E0532, // expected pattern path kind, found another pattern path kind // E0427, merged into 530 diff --git a/src/librustc_resolve/lib.rs b/src/librustc_resolve/lib.rs index 962509be32..798614ce58 100644 --- a/src/librustc_resolve/lib.rs +++ b/src/librustc_resolve/lib.rs @@ -19,6 +19,7 @@ #![feature(associated_consts)] #![feature(borrow_state)] +#![feature(dotdot_in_tuple_patterns)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] #![feature(staged_api)] @@ -40,37 +41,38 @@ use self::TypeParameters::*; use self::RibKind::*; use self::UseLexicalScopeFlag::*; use self::ModulePrefixResult::*; -use self::ParentLink::*; use rustc::hir::map::Definitions; use rustc::hir::{self, PrimTy, TyBool, TyChar, TyFloat, TyInt, TyUint, TyStr}; +use rustc::middle::cstore::CrateLoader; use rustc::session::Session; use rustc::lint; use rustc::hir::def::*; -use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId}; +use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; use rustc::ty; -use rustc::ty::subst::{ParamSpace, FnSpace, TypeSpace}; use rustc::hir::{Freevar, FreevarMap, TraitCandidate, TraitMap, GlobMap}; use rustc::util::nodemap::{NodeMap, NodeSet, FnvHashMap, FnvHashSet}; +use syntax::ext::base::MultiItemModifier; use syntax::ext::hygiene::Mark; use syntax::ast::{self, FloatTy}; -use syntax::ast::{CRATE_NODE_ID, Name, NodeId, CrateNum, IntTy, UintTy}; +use syntax::ast::{CRATE_NODE_ID, Name, NodeId, SpannedIdent, IntTy, UintTy}; use syntax::parse::token::{self, keywords}; use syntax::util::lev_distance::find_best_match_for_name; use syntax::visit::{self, FnKind, Visitor}; +use syntax::attr; use syntax::ast::{Arm, BindingMode, Block, Crate, Expr, ExprKind}; use syntax::ast::{FnDecl, ForeignItem, ForeignItemKind, Generics}; use syntax::ast::{Item, ItemKind, ImplItem, ImplItemKind}; use syntax::ast::{Local, Mutability, Pat, PatKind, Path}; use syntax::ast::{PathSegment, PathParameters, QSelf, TraitItemKind, TraitRef, Ty, TyKind}; -use syntax_pos::Span; +use syntax_pos::{Span, DUMMY_SP}; use errors::DiagnosticBuilder; -use std::collections::{HashMap, HashSet}; use std::cell::{Cell, RefCell}; +use std::rc::Rc; use std::fmt; use std::mem::replace; @@ -80,10 +82,10 @@ use resolve_imports::{ImportDirective, NameResolution}; // registered before they are used. mod diagnostics; +mod macros; mod check_unused; mod build_reduced_graph; mod resolve_imports; -mod assign_ids; enum SuggestionType { Macro(String), @@ -103,7 +105,7 @@ enum ResolutionError<'a> { /// error E0402: cannot use an outer type parameter in this context OuterTypeParameterContext, /// error E0403: the name is already used for a type parameter in this type parameter list - NameAlreadyUsedInTypeParameterList(Name), + NameAlreadyUsedInTypeParameterList(Name, &'a Span), /// error E0404: is not a trait IsNotATrait(&'a str), /// error E0405: use of undeclared trait name @@ -117,7 +119,7 @@ enum ResolutionError<'a> { /// error E0408: variable `{}` from pattern #{} is not bound in pattern #{} VariableNotBoundInPattern(Name, usize, usize), /// error E0409: variable is bound with different mode in pattern #{} than in pattern #1 - VariableBoundWithDifferentMode(Name, usize), + VariableBoundWithDifferentMode(Name, usize, Span), /// error E0411: use of `Self` outside of an impl or trait SelfUsedOutsideImplOrTrait, /// error E0412: use of undeclared @@ -210,13 +212,17 @@ fn resolve_struct_error<'b, 'a: 'b, 'c>(resolver: &'b Resolver<'a>, E0402, "cannot use an outer type parameter in this context") } - ResolutionError::NameAlreadyUsedInTypeParameterList(name) => { - struct_span_err!(resolver.session, - span, - E0403, - "the name `{}` is already used for a type parameter in this type \ - parameter list", - name) + ResolutionError::NameAlreadyUsedInTypeParameterList(name, first_use_span) => { + let mut err = struct_span_err!(resolver.session, + span, + E0403, + "the name `{}` is already used for a type parameter \ + in this type parameter list", + name); + err.span_label(span, &format!("already used")); + err.span_label(first_use_span.clone(), &format!("first use of `{}`", name)); + err + } ResolutionError::IsNotATrait(name) => { let mut err = struct_span_err!(resolver.session, @@ -238,28 +244,34 @@ fn resolve_struct_error<'b, 'a: 'b, 'c>(resolver: &'b Resolver<'a>, err } ResolutionError::MethodNotMemberOfTrait(method, trait_) => { - struct_span_err!(resolver.session, - span, - E0407, - "method `{}` is not a member of trait `{}`", - method, - trait_) + let mut err = struct_span_err!(resolver.session, + span, + E0407, + "method `{}` is not a member of trait `{}`", + method, + trait_); + err.span_label(span, &format!("not a member of trait `{}`", trait_)); + err } ResolutionError::TypeNotMemberOfTrait(type_, trait_) => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0437, "type `{}` is not a member of trait `{}`", type_, - trait_) + trait_); + err.span_label(span, &format!("not a member of trait `{}`", trait_)); + err } ResolutionError::ConstNotMemberOfTrait(const_, trait_) => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0438, "const `{}` is not a member of trait `{}`", const_, - trait_) + trait_); + err.span_label(span, &format!("not a member of trait `{}`", trait_)); + err } ResolutionError::VariableNotBoundInPattern(variable_name, from, to) => { struct_span_err!(resolver.session, @@ -270,14 +282,19 @@ fn resolve_struct_error<'b, 'a: 'b, 'c>(resolver: &'b Resolver<'a>, from, to) } - ResolutionError::VariableBoundWithDifferentMode(variable_name, pattern_number) => { - struct_span_err!(resolver.session, + ResolutionError::VariableBoundWithDifferentMode(variable_name, + pattern_number, + first_binding_span) => { + let mut err = struct_span_err!(resolver.session, span, E0409, "variable `{}` is bound with different mode in pattern #{} than in \ pattern #1", variable_name, - pattern_number) + pattern_number); + err.span_label(span, &format!("bound in different ways")); + err.span_label(first_binding_span, &format!("first binding")); + err } ResolutionError::SelfUsedOutsideImplOrTrait => { let mut err = struct_span_err!(resolver.session, @@ -317,35 +334,46 @@ fn resolve_struct_error<'b, 'a: 'b, 'c>(resolver: &'b Resolver<'a>, err } ResolutionError::DoesNotNameAStruct(name) => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0422, "`{}` does not name a structure", - name) + name); + err.span_label(span, &format!("not a structure")); + err } ResolutionError::StructVariantUsedAsFunction(path_name) => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0423, "`{}` is the name of a struct or struct variant, but this expression \ uses it like a function name", - path_name) + path_name); + err.span_label(span, &format!("struct called like a function")); + err } ResolutionError::SelfNotAvailableInStaticMethod => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0424, - "`self` is not available in a static method. Maybe a `self` \ - argument is missing?") + "`self` is not available in a static method"); + err.span_label(span, &format!("not available in static method")); + err.note(&format!("maybe a `self` argument is missing?")); + err } ResolutionError::UnresolvedName { path, message: msg, context, is_static_method, is_field, def } => { let mut err = struct_span_err!(resolver.session, span, E0425, - "unresolved name `{}`{}", - path, - msg); + "unresolved name `{}`", + path); + if msg != "" { + err.span_label(span, &msg); + } else { + err.span_label(span, &format!("unresolved name")); + } + match context { UnresolvedNameContext::Other => { if msg.is_empty() && is_static_method && is_field { @@ -361,7 +389,7 @@ fn resolve_struct_error<'b, 'a: 'b, 'c>(resolver: &'b Resolver<'a>, module = path, ident = ident.node) } - Some(&ExprKind::MethodCall(ident, _, _)) => { + Some(&ExprKind::MethodCall(ident, ..)) => { format!("to call a function from the `{module}` module, \ use `{module}::{ident}(..)`", module = path, @@ -378,11 +406,13 @@ fn resolve_struct_error<'b, 'a: 'b, 'c>(resolver: &'b Resolver<'a>, err } ResolutionError::UndeclaredLabel(name) => { - struct_span_err!(resolver.session, - span, - E0426, - "use of undeclared label `{}`", - name) + let mut err = struct_span_err!(resolver.session, + span, + E0426, + "use of undeclared label `{}`", + name); + err.span_label(span, &format!("undeclared label `{}`",&name)); + err } ResolutionError::SelfImportsOnlyAllowedWithin => { struct_span_err!(resolver.session, @@ -406,10 +436,14 @@ fn resolve_struct_error<'b, 'a: 'b, 'c>(resolver: &'b Resolver<'a>, } ResolutionError::UnresolvedImport(name) => { let msg = match name { - Some((n, p)) => format!("unresolved import `{}`{}", n, p), + Some((n, _)) => format!("unresolved import `{}`", n), None => "unresolved import".to_owned(), }; - struct_span_err!(resolver.session, span, E0432, "{}", msg) + let mut err = struct_span_err!(resolver.session, span, E0432, "{}", msg); + if let Some((_, p)) = name { + err.span_label(span, &p); + } + err } ResolutionError::FailedToResolve(msg) => { let mut err = struct_span_err!(resolver.session, span, E0433, @@ -426,13 +460,15 @@ fn resolve_struct_error<'b, 'a: 'b, 'c>(resolver: &'b Resolver<'a>, closure form instead") } ResolutionError::AttemptToUseNonConstantValueInConstant => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0435, - "attempt to use a non-constant value in a constant") + "attempt to use a non-constant value in a constant"); + err.span_label(span, &format!("non-constant used with constant")); + err } ResolutionError::BindingShadowsSomethingUnacceptable(what_binding, name, binding) => { - let shadows_what = PathResolution::new(binding.def().unwrap()).kind_name(); + let shadows_what = PathResolution::new(binding.def()).kind_name(); let mut err = struct_span_err!(resolver.session, span, E0530, @@ -470,7 +506,7 @@ struct BindingInfo { } // Map from the name in a pattern to its binding mode. -type BindingMap = HashMap; +type BindingMap = FnvHashMap; #[derive(Copy, Clone, PartialEq, Eq, Debug)] enum PatternSource { @@ -557,7 +593,7 @@ impl<'a> Visitor for Resolver<'a> { fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) { let type_parameters = match foreign_item.node { ForeignItemKind::Fn(_, ref generics) => { - HasTypeParameters(generics, FnSpace, ItemRibKind) + HasTypeParameters(generics, ItemRibKind) } ForeignItemKind::Static(..) => NoTypeParameters, }; @@ -572,7 +608,7 @@ impl<'a> Visitor for Resolver<'a> { _: Span, node_id: NodeId) { let rib_kind = match function_kind { - FnKind::ItemFn(_, generics, _, _, _, _) => { + FnKind::ItemFn(_, generics, ..) => { self.visit_generics(generics); ItemRibKind } @@ -625,10 +661,6 @@ enum TypeParameters<'a, 'b> { HasTypeParameters(// Type parameters. &'b Generics, - // Identifies the things that these parameters - // were declared on (type, fn, etc) - ParamSpace, - // The kind of the rib used for type parameters. RibKind<'a>), } @@ -679,14 +711,14 @@ enum ModulePrefixResult<'a> { /// One local scope. #[derive(Debug)] struct Rib<'a> { - bindings: HashMap, + bindings: FnvHashMap, kind: RibKind<'a>, } impl<'a> Rib<'a> { fn new(kind: RibKind<'a>) -> Rib<'a> { Rib { - bindings: HashMap::new(), + bindings: FnvHashMap(), kind: kind, } } @@ -716,7 +748,7 @@ impl<'a> LexicalScopeBinding<'a> { fn local_def(self) -> LocalDef { match self { LexicalScopeBinding::LocalDef(local_def) => local_def, - LexicalScopeBinding::Item(binding) => LocalDef::from_def(binding.def().unwrap()), + LexicalScopeBinding::Item(binding) => LocalDef::from_def(binding.def()), } } @@ -726,35 +758,30 @@ impl<'a> LexicalScopeBinding<'a> { _ => None, } } - - fn module(self) -> Option> { - self.item().and_then(NameBinding::module) - } } -/// The link from a module up to its nearest parent node. -#[derive(Clone,Debug)] -enum ParentLink<'a> { - NoParentLink, - ModuleParentLink(Module<'a>, Name), - BlockParentLink(Module<'a>, NodeId), +enum ModuleKind { + Block(NodeId), + Def(Def, Name), } /// One node in the tree of modules. pub struct ModuleS<'a> { - parent_link: ParentLink<'a>, - def: Option, + parent: Option>, + kind: ModuleKind, + + // The node id of the closest normal module (`mod`) ancestor (including this module). + normal_ancestor_id: Option, // If the module is an extern crate, `def` is root of the external crate and `extern_crate_id` // is the NodeId of the local `extern crate` item (otherwise, `extern_crate_id` is None). extern_crate_id: Option, - resolutions: RefCell>>>, - unresolved_imports: RefCell>>, + resolutions: RefCell>>>, - no_implicit_prelude: Cell, + no_implicit_prelude: bool, - glob_importers: RefCell, &'a ImportDirective<'a>)>>, + glob_importers: RefCell>>, globs: RefCell>>, // Used to memoize the traits in this module for faster searches through all traits in scope. @@ -764,29 +791,23 @@ pub struct ModuleS<'a> { // access the children must be preceded with a // `populate_module_if_necessary` call. populated: Cell, - - arenas: &'a ResolverArenas<'a>, } pub type Module<'a> = &'a ModuleS<'a>; impl<'a> ModuleS<'a> { - fn new(parent_link: ParentLink<'a>, - def: Option, - external: bool, - arenas: &'a ResolverArenas<'a>) -> Self { + fn new(parent: Option>, kind: ModuleKind) -> Self { ModuleS { - parent_link: parent_link, - def: def, + parent: parent, + kind: kind, + normal_ancestor_id: None, extern_crate_id: None, - resolutions: RefCell::new(HashMap::new()), - unresolved_imports: RefCell::new(Vec::new()), - no_implicit_prelude: Cell::new(false), + resolutions: RefCell::new(FnvHashMap()), + no_implicit_prelude: false, glob_importers: RefCell::new(Vec::new()), globs: RefCell::new((Vec::new())), traits: RefCell::new(None), - populated: Cell::new(!external), - arenas: arenas + populated: Cell::new(true), } } @@ -796,21 +817,28 @@ impl<'a> ModuleS<'a> { } } + fn def(&self) -> Option { + match self.kind { + ModuleKind::Def(def, _) => Some(def), + _ => None, + } + } + fn def_id(&self) -> Option { - self.def.as_ref().map(Def::def_id) + self.def().as_ref().map(Def::def_id) } // `self` resolves to the first module ancestor that `is_normal`. fn is_normal(&self) -> bool { - match self.def { - Some(Def::Mod(_)) => true, + match self.kind { + ModuleKind::Def(Def::Mod(_), _) => true, _ => false, } } fn is_trait(&self) -> bool { - match self.def { - Some(Def::Trait(_)) => true, + match self.kind { + ModuleKind::Def(Def::Trait(_), _) => true, _ => false, } } @@ -818,7 +846,7 @@ impl<'a> ModuleS<'a> { impl<'a> fmt::Debug for ModuleS<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.def) + write!(f, "{:?}", self.def()) } } @@ -847,33 +875,43 @@ enum NameBindingKind<'a> { Import { binding: &'a NameBinding<'a>, directive: &'a ImportDirective<'a>, + used: Cell, }, + Ambiguity { + b1: &'a NameBinding<'a>, + b2: &'a NameBinding<'a>, + } } -#[derive(Clone, Debug)] struct PrivacyError<'a>(Span, Name, &'a NameBinding<'a>); +struct AmbiguityError<'a> { + span: Span, + name: Name, + b1: &'a NameBinding<'a>, + b2: &'a NameBinding<'a>, +} + impl<'a> NameBinding<'a> { - fn module(&self) -> Option> { + fn module(&self) -> Result, bool /* true if an error has already been reported */> { match self.kind { - NameBindingKind::Module(module) => Some(module), - NameBindingKind::Def(_) => None, + NameBindingKind::Module(module) => Ok(module), NameBindingKind::Import { binding, .. } => binding.module(), + NameBindingKind::Def(Def::Err) => Err(true), + NameBindingKind::Def(_) => Err(false), + NameBindingKind::Ambiguity { .. } => Err(false), } } - fn def(&self) -> Option { + fn def(&self) -> Def { match self.kind { - NameBindingKind::Def(def) => Some(def), - NameBindingKind::Module(module) => module.def, + NameBindingKind::Def(def) => def, + NameBindingKind::Module(module) => module.def().unwrap(), NameBindingKind::Import { binding, .. } => binding.def(), + NameBindingKind::Ambiguity { .. } => Def::Err, } } - fn is_pseudo_public(&self) -> bool { - self.pseudo_vis() == ty::Visibility::Public - } - // We sometimes need to treat variants as `pub` for backwards compatibility fn pseudo_vis(&self) -> ty::Visibility { if self.is_variant() { ty::Visibility::Public } else { self.vis } @@ -887,7 +925,7 @@ impl<'a> NameBinding<'a> { } fn is_extern_crate(&self) -> bool { - self.module().and_then(|module| module.extern_crate_id).is_some() + self.module().ok().and_then(|module| module.extern_crate_id).is_some() } fn is_import(&self) -> bool { @@ -900,12 +938,13 @@ impl<'a> NameBinding<'a> { fn is_glob_import(&self) -> bool { match self.kind { NameBindingKind::Import { directive, .. } => directive.is_glob(), + NameBindingKind::Ambiguity { .. } => true, _ => false, } } fn is_importable(&self) -> bool { - match self.def().unwrap() { + match self.def() { Def::AssociatedConst(..) | Def::Method(..) | Def::AssociatedTy(..) => false, _ => true, } @@ -914,12 +953,12 @@ impl<'a> NameBinding<'a> { /// Interns the names of the primitive types. struct PrimitiveTypeTable { - primitive_types: HashMap, + primitive_types: FnvHashMap, } impl PrimitiveTypeTable { fn new() -> PrimitiveTypeTable { - let mut table = PrimitiveTypeTable { primitive_types: HashMap::new() }; + let mut table = PrimitiveTypeTable { primitive_types: FnvHashMap() }; table.intern("bool", TyBool); table.intern("char", TyChar); @@ -953,7 +992,7 @@ pub struct Resolver<'a> { // Maps the node id of a statement to the expansions of the `macro_rules!`s // immediately above the statement (if appropriate). - macros_at_scope: HashMap>, + macros_at_scope: FnvHashMap>, graph_root: Module<'a>, @@ -963,8 +1002,11 @@ pub struct Resolver<'a> { structs: FnvHashMap>, - // The number of imports that are currently unresolved. - unresolved_imports: usize, + // All imports known to succeed or fail. + determined_imports: Vec<&'a ImportDirective<'a>>, + + // All non-determined imports. + indeterminate_imports: Vec<&'a ImportDirective<'a>>, // The module that represents the current item scope. current_module: Module<'a>, @@ -1020,13 +1062,24 @@ pub struct Resolver<'a> { // all imports, but only glob imports are actually interesting). pub glob_map: GlobMap, - used_imports: HashSet<(NodeId, Namespace)>, - used_crates: HashSet, + used_imports: FnvHashSet<(NodeId, Namespace)>, + used_crates: FnvHashSet, pub maybe_unused_trait_imports: NodeSet, privacy_errors: Vec>, + ambiguity_errors: Vec>, arenas: &'a ResolverArenas<'a>, + dummy_binding: &'a NameBinding<'a>, + new_import_semantics: bool, // true if `#![feature(item_like_imports)]` + + pub exported_macros: Vec, + pub derive_modes: FnvHashMap>, + crate_loader: &'a mut CrateLoader, + macro_names: FnvHashSet, + + // Maps the `Mark` of an expansion to its containing module or block. + expansion_data: FnvHashMap, } pub struct ResolverArenas<'a> { @@ -1061,15 +1114,12 @@ impl<'a> ResolverArenas<'a> { } impl<'a> ty::NodeIdTree for Resolver<'a> { - fn is_descendant_of(&self, node: NodeId, ancestor: NodeId) -> bool { - let ancestor = self.definitions.local_def_id(ancestor); - let mut module = *self.module_map.get(&node).unwrap(); - while module.def_id() != Some(ancestor) { - let module_parent = match self.get_nearest_normal_module_parent(module) { - Some(parent) => parent, + fn is_descendant_of(&self, mut node: NodeId, ancestor: NodeId) -> bool { + while node != ancestor { + node = match self.module_map[&node].parent { + Some(parent) => parent.normal_ancestor_id.unwrap(), None => return false, - }; - module = module_parent; + } } true } @@ -1079,7 +1129,7 @@ impl<'a> hir::lowering::Resolver for Resolver<'a> { fn resolve_generated_global_path(&mut self, path: &hir::Path, is_value: bool) -> Def { let namespace = if is_value { ValueNS } else { TypeNS }; match self.resolve_crate_relative_path(path.span, &path.segments, namespace) { - Ok(binding) => binding.def().unwrap(), + Ok(binding) => binding.def(), Err(true) => Def::Err, Err(false) => { let path_name = &format!("{}", path); @@ -1106,8 +1156,8 @@ impl<'a> hir::lowering::Resolver for Resolver<'a> { self.def_map.insert(id, PathResolution::new(def)); } - fn definitions(&mut self) -> Option<&mut Definitions> { - Some(&mut self.definitions) + fn definitions(&mut self) -> &mut Definitions { + &mut self.definitions } } @@ -1128,20 +1178,29 @@ impl Named for hir::PathSegment { } impl<'a> Resolver<'a> { - pub fn new(session: &'a Session, make_glob_map: MakeGlobMap, arenas: &'a ResolverArenas<'a>) + pub fn new(session: &'a Session, + krate: &Crate, + make_glob_map: MakeGlobMap, + crate_loader: &'a mut CrateLoader, + arenas: &'a ResolverArenas<'a>) -> Resolver<'a> { - let root_def_id = DefId::local(CRATE_DEF_INDEX); - let graph_root = - ModuleS::new(NoParentLink, Some(Def::Mod(root_def_id)), false, arenas); - let graph_root = arenas.alloc_module(graph_root); + let root_def = Def::Mod(DefId::local(CRATE_DEF_INDEX)); + let graph_root = arenas.alloc_module(ModuleS { + normal_ancestor_id: Some(CRATE_NODE_ID), + no_implicit_prelude: attr::contains_name(&krate.attrs, "no_implicit_prelude"), + ..ModuleS::new(None, ModuleKind::Def(root_def, keywords::Invalid.name())) + }); let mut module_map = NodeMap(); module_map.insert(CRATE_NODE_ID, graph_root); + let mut expansion_data = FnvHashMap(); + expansion_data.insert(0, macros::ExpansionData::default()); // Crate root expansion + Resolver { session: session, definitions: Definitions::new(), - macros_at_scope: HashMap::new(), + macros_at_scope: FnvHashMap(), // The outermost module has def ID 0; this is not reflected in the // AST. @@ -1151,7 +1210,8 @@ impl<'a> Resolver<'a> { trait_item_map: FnvHashMap(), structs: FnvHashMap(), - unresolved_imports: 0, + determined_imports: Vec::new(), + indeterminate_imports: Vec::new(), current_module: graph_root, value_ribs: vec![Rib::new(ModuleRibKind(graph_root))], @@ -1174,13 +1234,26 @@ impl<'a> Resolver<'a> { make_glob_map: make_glob_map == MakeGlobMap::Yes, glob_map: NodeMap(), - used_imports: HashSet::new(), - used_crates: HashSet::new(), + used_imports: FnvHashSet(), + used_crates: FnvHashSet(), maybe_unused_trait_imports: NodeSet(), privacy_errors: Vec::new(), + ambiguity_errors: Vec::new(), arenas: arenas, + dummy_binding: arenas.alloc_name_binding(NameBinding { + kind: NameBindingKind::Def(Def::Err), + span: DUMMY_SP, + vis: ty::Visibility::Public, + }), + new_import_semantics: session.features.borrow().item_like_imports, + + exported_macros: Vec::new(), + derive_modes: FnvHashMap(), + crate_loader: crate_loader, + macro_names: FnvHashSet(), + expansion_data: expansion_data, } } @@ -1200,34 +1273,43 @@ impl<'a> Resolver<'a> { visit::walk_crate(self, krate); check_unused::check_crate(self, krate); - self.report_privacy_errors(); + self.report_errors(); + self.crate_loader.postprocess(krate); } - fn new_module(&self, parent_link: ParentLink<'a>, def: Option, external: bool) - -> Module<'a> { - self.arenas.alloc_module(ModuleS::new(parent_link, def, external, self.arenas)) - } - - fn new_extern_crate_module(&self, parent_link: ParentLink<'a>, def: Def, local_node_id: NodeId) - -> Module<'a> { - let mut module = ModuleS::new(parent_link, Some(def), false, self.arenas); - module.extern_crate_id = Some(local_node_id); - self.arenas.modules.alloc(module) + fn new_module(&self, parent: Module<'a>, kind: ModuleKind, local: bool) -> Module<'a> { + self.arenas.alloc_module(ModuleS { + normal_ancestor_id: if local { self.current_module.normal_ancestor_id } else { None }, + populated: Cell::new(local), + ..ModuleS::new(Some(parent), kind) + }) } fn get_ribs<'b>(&'b mut self, ns: Namespace) -> &'b mut Vec> { match ns { ValueNS => &mut self.value_ribs, TypeNS => &mut self.type_ribs } } - fn record_use(&mut self, name: Name, ns: Namespace, binding: &'a NameBinding<'a>) { + fn record_use(&mut self, name: Name, ns: Namespace, binding: &'a NameBinding<'a>, span: Span) + -> bool /* true if an error was reported */ { // track extern crates for unused_extern_crate lint - if let Some(DefId { krate, .. }) = binding.module().and_then(ModuleS::def_id) { + if let Some(DefId { krate, .. }) = binding.module().ok().and_then(ModuleS::def_id) { self.used_crates.insert(krate); } - if let NameBindingKind::Import { directive, .. } = binding.kind { - self.used_imports.insert((directive.id, ns)); - self.add_to_glob_map(directive.id, name); + match binding.kind { + NameBindingKind::Import { directive, binding, ref used } if !used.get() => { + used.set(true); + self.used_imports.insert((directive.id, ns)); + self.add_to_glob_map(directive.id, name); + self.record_use(name, ns, binding, span) + } + NameBindingKind::Import { .. } => false, + NameBindingKind::Ambiguity { b1, b2 } => { + let ambiguity_error = AmbiguityError { span: span, name: name, b1: b1, b2: b2 }; + self.ambiguity_errors.push(ambiguity_error); + true + } + _ => false } } @@ -1237,21 +1319,33 @@ impl<'a> Resolver<'a> { } } + fn expect_module(&mut self, name: Name, binding: &'a NameBinding<'a>, span: Option) + -> ResolveResult> { + match binding.module() { + Ok(module) => Success(module), + Err(true) => Failed(None), + Err(false) => { + let msg = format!("Not a module `{}`", name); + Failed(span.map(|span| (span, msg))) + } + } + } + /// Resolves the given module path from the given root `search_module`. fn resolve_module_path_from_root(&mut self, mut search_module: Module<'a>, module_path: &[Name], index: usize, - span: Span) + span: Option) -> ResolveResult> { - fn search_parent_externals(needle: Name, module: Module) -> Option { - match module.resolve_name(needle, TypeNS, false) { + fn search_parent_externals<'a>(this: &mut Resolver<'a>, needle: Name, module: Module<'a>) + -> Option> { + match this.resolve_name_in_module(module, needle, TypeNS, false, None) { Success(binding) if binding.is_extern_crate() => Some(module), - _ => match module.parent_link { - ModuleParentLink(ref parent, _) => { - search_parent_externals(needle, parent) - } - _ => None, + _ => if let (&ModuleKind::Def(..), Some(parent)) = (&module.kind, module.parent) { + search_parent_externals(this, needle, parent) + } else { + None }, } } @@ -1264,16 +1358,17 @@ impl<'a> Resolver<'a> { // modules as we go. while index < module_path_len { let name = module_path[index]; - match self.resolve_name_in_module(search_module, name, TypeNS, false, true) { - Failed(None) => { + match self.resolve_name_in_module(search_module, name, TypeNS, false, span) { + Failed(_) => { let segment_name = name.as_str(); let module_name = module_to_string(search_module); let msg = if "???" == &module_name { - match search_parent_externals(name, &self.current_module) { + let current_module = self.current_module; + match search_parent_externals(self, name, current_module) { Some(module) => { let path_str = names_to_string(module_path); let target_mod_str = module_to_string(&module); - let current_mod_str = module_to_string(&self.current_module); + let current_mod_str = module_to_string(current_module); let prefix = if target_mod_str == current_mod_str { "self::".to_string() @@ -1289,9 +1384,8 @@ impl<'a> Resolver<'a> { format!("Could not find `{}` in `{}`", segment_name, module_name) }; - return Failed(Some((span, msg))); + return Failed(span.map(|span| (span, msg))); } - Failed(err) => return Failed(err), Indeterminate => { debug!("(resolving module path for import) module resolution is \ indeterminate: {}", @@ -1301,12 +1395,9 @@ impl<'a> Resolver<'a> { Success(binding) => { // Check to see whether there are type bindings, and, if // so, whether there is a module within. - if let Some(module_def) = binding.module() { - self.check_privacy(name, binding, span); - search_module = module_def; - } else { - let msg = format!("Not a module `{}`", name); - return Failed(Some((span, msg))); + match self.expect_module(name, binding, span) { + Success(module) => search_module = module, + result @ _ => return result, } } } @@ -1322,7 +1413,7 @@ impl<'a> Resolver<'a> { fn resolve_module_path(&mut self, module_path: &[Name], use_lexical_scope: UseLexicalScopeFlag, - span: Span) + span: Option) -> ResolveResult> { if module_path.len() == 0 { return Success(self.graph_root) // Use the crate root @@ -1359,13 +1450,20 @@ impl<'a> Resolver<'a> { // first component of the path in the current lexical // scope and then proceed to resolve below that. let ident = ast::Ident::with_empty_ctxt(module_path[0]); - match self.resolve_ident_in_lexical_scope(ident, TypeNS, true) - .and_then(LexicalScopeBinding::module) { - None => return Failed(None), - Some(containing_module) => { - search_module = containing_module; - start_index = 1; + let lexical_binding = + self.resolve_ident_in_lexical_scope(ident, TypeNS, span); + if let Some(binding) = lexical_binding.and_then(LexicalScopeBinding::item) { + match self.expect_module(ident.name, binding, span) { + Success(containing_module) => { + search_module = containing_module; + start_index = 1; + } + result @ _ => return result, } + } else { + let msg = + format!("Use of undeclared type or module `{}`", ident.name); + return Failed(span.map(|span| (span, msg))); } } } @@ -1376,10 +1474,7 @@ impl<'a> Resolver<'a> { } } - self.resolve_module_path_from_root(search_module, - module_path, - start_index, - span) + self.resolve_module_path_from_root(search_module, module_path, start_index, span) } /// This resolves the identifier `ident` in the namespace `ns` in the current lexical scope. @@ -1402,7 +1497,7 @@ impl<'a> Resolver<'a> { fn resolve_ident_in_lexical_scope(&mut self, mut ident: ast::Ident, ns: Namespace, - record_used: bool) + record_used: Option) -> Option> { if ns == TypeNS { ident = ast::Ident::with_empty_ctxt(ident.name); @@ -1426,15 +1521,13 @@ impl<'a> Resolver<'a> { return Some(LexicalScopeBinding::Item(binding)); } - // We can only see through anonymous modules - if module.def.is_some() { - return match self.prelude { - Some(prelude) if !module.no_implicit_prelude.get() => { - prelude.resolve_name(name, ns, false).success() - .map(LexicalScopeBinding::Item) - } - _ => None, - }; + if let ModuleKind::Block(..) = module.kind { // We can see through blocks + } else if !module.no_implicit_prelude { + return self.prelude.and_then(|prelude| { + self.resolve_name_in_module(prelude, name, ns, false, None).success() + }).map(LexicalScopeBinding::Item) + } else { + return None; } } @@ -1451,39 +1544,10 @@ impl<'a> Resolver<'a> { None } - /// Returns the nearest normal module parent of the given module. - fn get_nearest_normal_module_parent(&self, mut module: Module<'a>) -> Option> { - loop { - match module.parent_link { - NoParentLink => return None, - ModuleParentLink(new_module, _) | - BlockParentLink(new_module, _) => { - let new_module = new_module; - if new_module.is_normal() { - return Some(new_module); - } - module = new_module; - } - } - } - } - - /// Returns the nearest normal module parent of the given module, or the - /// module itself if it is a normal module. - fn get_nearest_normal_module_parent_or_self(&self, module: Module<'a>) -> Module<'a> { - if module.is_normal() { - return module; - } - match self.get_nearest_normal_module_parent(module) { - None => module, - Some(new_module) => new_module, - } - } - /// Resolves a "module prefix". A module prefix is one or both of (a) `self::`; /// (b) some chain of `super::`. /// grammar: (SELF MOD_SEP ) ? (SUPER MOD_SEP) * - fn resolve_module_prefix(&mut self, module_path: &[Name], span: Span) + fn resolve_module_prefix(&mut self, module_path: &[Name], span: Option) -> ResolveResult> { // Start at the current module if we see `self` or `super`, or at the // top of the crate otherwise. @@ -1492,22 +1556,20 @@ impl<'a> Resolver<'a> { "super" => 0, _ => return Success(NoPrefixFound), }; + let mut containing_module = - self.get_nearest_normal_module_parent_or_self(self.current_module); + self.module_map[&self.current_module.normal_ancestor_id.unwrap()]; // Now loop through all the `super`s we find. while i < module_path.len() && "super" == module_path[i].as_str() { debug!("(resolving module prefix) resolving `super` at {}", module_to_string(&containing_module)); - match self.get_nearest_normal_module_parent(containing_module) { - None => { - let msg = "There are too many initial `super`s.".into(); - return Failed(Some((span, msg))); - } - Some(new_module) => { - containing_module = new_module; - i += 1; - } + if let Some(parent) = containing_module.parent { + containing_module = self.module_map[&parent.normal_ancestor_id.unwrap()]; + i += 1; + } else { + let msg = "There are too many initial `super`s.".into(); + return Failed(span.map(|span| (span, msg))); } } @@ -1517,27 +1579,6 @@ impl<'a> Resolver<'a> { return Success(PrefixFound(containing_module, i)); } - /// Attempts to resolve the supplied name in the given module for the - /// given namespace. If successful, returns the binding corresponding to - /// the name. - fn resolve_name_in_module(&mut self, - module: Module<'a>, - name: Name, - namespace: Namespace, - use_lexical_scope: bool, - record_used: bool) - -> ResolveResult<&'a NameBinding<'a>> { - debug!("(resolving name in module) resolving `{}` in `{}`", name, module_to_string(module)); - - self.populate_module_if_necessary(module); - module.resolve_name(name, namespace, use_lexical_scope).and_then(|binding| { - if record_used { - self.record_use(name, namespace, binding); - } - Success(binding) - }) - } - // AST resolution // // We maintain a list of value ribs and type ribs. @@ -1562,7 +1603,7 @@ impl<'a> Resolver<'a> { let module = self.module_map.get(&id).cloned(); // clones a reference if let Some(module) = module { // Move down in the graph. - let orig_module = ::std::mem::replace(&mut self.current_module, module); + let orig_module = replace(&mut self.current_module, module); self.value_ribs.push(Rib::new(ModuleRibKind(module))); self.type_ribs.push(Rib::new(ModuleRibKind(module))); @@ -1613,19 +1654,17 @@ impl<'a> Resolver<'a> { match item.node { ItemKind::Enum(_, ref generics) | ItemKind::Ty(_, ref generics) | - ItemKind::Struct(_, ref generics) => { - self.with_type_parameter_rib(HasTypeParameters(generics, TypeSpace, ItemRibKind), - |this| visit::walk_item(this, item)); - } - ItemKind::Fn(_, _, _, _, ref generics, _) => { - self.with_type_parameter_rib(HasTypeParameters(generics, FnSpace, ItemRibKind), + ItemKind::Struct(_, ref generics) | + ItemKind::Union(_, ref generics) | + ItemKind::Fn(.., ref generics, _) => { + self.with_type_parameter_rib(HasTypeParameters(generics, ItemRibKind), |this| visit::walk_item(this, item)); } ItemKind::DefaultImpl(_, ref trait_ref) => { self.with_optional_trait_ref(Some(trait_ref), |_, _| {}); } - ItemKind::Impl(_, _, ref generics, ref opt_trait_ref, ref self_type, ref impl_items) => + ItemKind::Impl(.., ref generics, ref opt_trait_ref, ref self_type, ref impl_items) => self.resolve_implementation(generics, opt_trait_ref, &self_type, @@ -1634,10 +1673,7 @@ impl<'a> Resolver<'a> { ItemKind::Trait(_, ref generics, ref bounds, ref trait_items) => { // Create a new rib for the trait-wide type parameters. - self.with_type_parameter_rib(HasTypeParameters(generics, - TypeSpace, - ItemRibKind), - |this| { + self.with_type_parameter_rib(HasTypeParameters(generics, ItemRibKind), |this| { let local_def_id = this.definitions.local_def_id(item.id); this.with_self_rib(Def::SelfTy(Some(local_def_id), None), |this| { this.visit_generics(generics); @@ -1660,7 +1696,6 @@ impl<'a> Resolver<'a> { TraitItemKind::Method(ref sig, _) => { let type_parameters = HasTypeParameters(&sig.generics, - FnSpace, MethodRibKind(!sig.decl.has_self())); this.with_type_parameter_rib(type_parameters, |this| { visit::walk_trait_item(this, trait_item) @@ -1699,7 +1734,7 @@ impl<'a> Resolver<'a> { &prefix.segments, TypeNS) { Ok(binding) => { - let def = binding.def().unwrap(); + let def = binding.def(); self.record_def(item.id, PathResolution::new(def)); } Err(true) => self.record_def(item.id, err_path_resolution()), @@ -1729,23 +1764,25 @@ impl<'a> Resolver<'a> { where F: FnOnce(&mut Resolver) { match type_parameters { - HasTypeParameters(generics, space, rib_kind) => { + HasTypeParameters(generics, rib_kind) => { let mut function_type_rib = Rib::new(rib_kind); - let mut seen_bindings = HashSet::new(); - for (index, type_parameter) in generics.ty_params.iter().enumerate() { + let mut seen_bindings = FnvHashMap(); + for type_parameter in &generics.ty_params { let name = type_parameter.ident.name; debug!("with_type_parameter_rib: {}", type_parameter.id); - if seen_bindings.contains(&name) { + if seen_bindings.contains_key(&name) { + let span = seen_bindings.get(&name).unwrap(); resolve_error(self, type_parameter.span, - ResolutionError::NameAlreadyUsedInTypeParameterList(name)); + ResolutionError::NameAlreadyUsedInTypeParameterList(name, + span)); } - seen_bindings.insert(name); + seen_bindings.entry(name).or_insert(type_parameter.span); // plain insert (no renaming) let def_id = self.definitions.local_def_id(type_parameter.id); - let def = Def::TyParam(space, index as u32, def_id, name); + let def = Def::TyParam(def_id); function_type_rib.bindings.insert(ast::Ident::with_empty_ctxt(name), def); self.record_def(type_parameter.id, PathResolution::new(def)); } @@ -1793,7 +1830,7 @@ impl<'a> Resolver<'a> { self.label_ribs.push(Rib::new(rib_kind)); // Add each argument to the rib. - let mut bindings_list = HashMap::new(); + let mut bindings_list = FnvHashMap(); for argument in &declaration.inputs { self.resolve_pattern(&argument.pat, PatternSource::FnParam, &mut bindings_list); @@ -1917,10 +1954,7 @@ impl<'a> Resolver<'a> { item_id: NodeId, impl_items: &[ImplItem]) { // If applicable, create a rib for the type parameters. - self.with_type_parameter_rib(HasTypeParameters(generics, - TypeSpace, - ItemRibKind), - |this| { + self.with_type_parameter_rib(HasTypeParameters(generics, ItemRibKind), |this| { // Resolve the type parameters. this.visit_generics(generics); @@ -1929,7 +1963,8 @@ impl<'a> Resolver<'a> { // Resolve the self type. this.visit_ty(self_type); - this.with_self_rib(Def::SelfTy(trait_id, Some(item_id)), |this| { + let item_def_id = this.definitions.local_def_id(item_id); + this.with_self_rib(Def::SelfTy(trait_id, Some(item_def_id)), |this| { this.with_current_self_type(self_type, |this| { for impl_item in impl_items { this.resolve_visibility(&impl_item.vis); @@ -1953,7 +1988,6 @@ impl<'a> Resolver<'a> { // specific type parameters. let type_parameters = HasTypeParameters(&sig.generics, - FnSpace, MethodRibKind(!sig.decl.has_self())); this.with_type_parameter_rib(type_parameters, |this| { visit::walk_impl_item(this, impl_item); @@ -1998,7 +2032,7 @@ impl<'a> Resolver<'a> { walk_list!(self, visit_expr, &local.init); // Resolve the pattern. - self.resolve_pattern(&local.pat, PatternSource::Let, &mut HashMap::new()); + self.resolve_pattern(&local.pat, PatternSource::Let, &mut FnvHashMap()); } // build a map from pattern identifiers to binding-info's. @@ -2006,7 +2040,7 @@ impl<'a> Resolver<'a> { // that expands into an or-pattern where one 'x' was from the // user and one 'x' came from the macro. fn binding_mode_map(&mut self, pat: &Pat) -> BindingMap { - let mut binding_map = HashMap::new(); + let mut binding_map = FnvHashMap(); pat.walk(&mut |pat| { if let PatKind::Ident(binding_mode, ident, ref sub_pat) = pat.node { @@ -2044,8 +2078,10 @@ impl<'a> Resolver<'a> { if binding_0.binding_mode != binding_i.binding_mode { resolve_error(self, binding_i.span, - ResolutionError::VariableBoundWithDifferentMode(key.name, - i + 1)); + ResolutionError::VariableBoundWithDifferentMode( + key.name, + i + 1, + binding_0.span)); } } } @@ -2064,7 +2100,7 @@ impl<'a> Resolver<'a> { fn resolve_arm(&mut self, arm: &Arm) { self.value_ribs.push(Rib::new(NormalRibKind)); - let mut bindings_list = HashMap::new(); + let mut bindings_list = FnvHashMap(); for pattern in &arm.pats { self.resolve_pattern(&pattern, PatternSource::Match, &mut bindings_list); } @@ -2174,6 +2210,7 @@ impl<'a> Resolver<'a> { Def::Trait(_) | Def::Enum(_) | Def::Struct(_) | + Def::Union(_) | Def::TyAlias(_) => true, _ => false, }, @@ -2200,18 +2237,18 @@ impl<'a> Resolver<'a> { } fn fresh_binding(&mut self, - ident: &ast::SpannedIdent, + ident: &SpannedIdent, pat_id: NodeId, outer_pat_id: NodeId, pat_src: PatternSource, - bindings: &mut HashMap) + bindings: &mut FnvHashMap) -> PathResolution { // Add the binding to the local ribs, if it // doesn't already exist in the bindings map. (We // must not add it if it's in the bindings map // because that breaks the assumptions later // passes make about or-patterns.) - let mut def = Def::Local(self.definitions.local_def_id(pat_id), pat_id); + let mut def = Def::Local(self.definitions.local_def_id(pat_id)); match bindings.get(&ident.node).cloned() { Some(id) if id == outer_pat_id => { // `Variant(a, a)`, error @@ -2305,7 +2342,7 @@ impl<'a> Resolver<'a> { pat_src: PatternSource, // Maps idents to the node ID for the // outermost pattern that binds them. - bindings: &mut HashMap) { + bindings: &mut FnvHashMap) { // Visit all direct subpatterns of this pattern. let outer_pat_id = pat.id; pat.walk(&mut |pat| { @@ -2313,16 +2350,17 @@ impl<'a> Resolver<'a> { PatKind::Ident(bmode, ref ident, ref opt_pat) => { // First try to resolve the identifier as some existing // entity, then fall back to a fresh binding. - let binding = self.resolve_ident_in_lexical_scope(ident.node, ValueNS, false) + let binding = self.resolve_ident_in_lexical_scope(ident.node, ValueNS, None) .and_then(LexicalScopeBinding::item); - let resolution = binding.and_then(NameBinding::def).and_then(|def| { + let resolution = binding.map(NameBinding::def).and_then(|def| { let always_binding = !pat_src.is_refutable() || opt_pat.is_some() || bmode != BindingMode::ByValue(Mutability::Immutable); match def { Def::Struct(..) | Def::Variant(..) | Def::Const(..) | Def::AssociatedConst(..) if !always_binding => { // A constant, unit variant, etc pattern. - self.record_use(ident.node.name, ValueNS, binding.unwrap()); + let name = ident.node.name; + self.record_use(name, ValueNS, binding.unwrap(), ident.span); Some(PathResolution::new(def)) } Def::Struct(..) | Def::Variant(..) | @@ -2353,7 +2391,7 @@ impl<'a> Resolver<'a> { self.record_def(pat.id, resolution); } - PatKind::TupleStruct(ref path, _, _) => { + PatKind::TupleStruct(ref path, ..) => { self.resolve_pattern_path(pat.id, None, path, ValueNS, |def| { match def { Def::Struct(..) | Def::Variant(..) => true, @@ -2372,10 +2410,10 @@ impl<'a> Resolver<'a> { }, "variant, struct or constant"); } - PatKind::Struct(ref path, _, _) => { + PatKind::Struct(ref path, ..) => { self.resolve_pattern_path(pat.id, None, path, TypeNS, |def| { match def { - Def::Struct(..) | Def::Variant(..) | + Def::Struct(..) | Def::Union(..) | Def::Variant(..) | Def::TyAlias(..) | Def::AssociatedTy(..) => true, _ => false, } @@ -2449,7 +2487,7 @@ impl<'a> Resolver<'a> { if path.global { let binding = self.resolve_crate_relative_path(span, segments, namespace); - return binding.map(|binding| mk_res(binding.def().unwrap())); + return binding.map(|binding| mk_res(binding.def())); } // Try to find a path to an item in a module. @@ -2480,14 +2518,14 @@ impl<'a> Resolver<'a> { // // Such behavior is required for backward compatibility. // The same fallback is used when `a` resolves to nothing. - let def = resolve_identifier_with_fallback(self, true).ok_or(false); + let def = resolve_identifier_with_fallback(self, Some(span)).ok_or(false); return def.and_then(|def| self.adjust_local_def(def, span).ok_or(true)).map(mk_res); } - let unqualified_def = resolve_identifier_with_fallback(self, false); + let unqualified_def = resolve_identifier_with_fallback(self, None); let qualified_binding = self.resolve_module_relative_path(span, segments, namespace); match (qualified_binding, unqualified_def) { - (Ok(binding), Some(ref ud)) if binding.def().unwrap() == ud.def => { + (Ok(binding), Some(ref ud)) if binding.def() == ud.def => { self.session .add_lint(lint::builtin::UNUSED_QUALIFICATIONS, id, @@ -2497,14 +2535,14 @@ impl<'a> Resolver<'a> { _ => {} } - qualified_binding.map(|binding| mk_res(binding.def().unwrap())) + qualified_binding.map(|binding| mk_res(binding.def())) } // Resolve a single identifier fn resolve_identifier(&mut self, identifier: ast::Ident, namespace: Namespace, - record_used: bool) + record_used: Option) -> Option { if identifier.name == keywords::Invalid.name() { return None; @@ -2526,7 +2564,7 @@ impl<'a> Resolver<'a> { Def::Upvar(..) => { span_bug!(span, "unexpected {:?} in bindings", def) } - Def::Local(_, node_id) => { + Def::Local(def_id) => { for rib in ribs { match rib.kind { NormalRibKind | ModuleRibKind(..) | MacroDefinition(..) => { @@ -2534,13 +2572,13 @@ impl<'a> Resolver<'a> { } ClosureRibKind(function_id) => { let prev_def = def; - let node_def_id = self.definitions.local_def_id(node_id); + let node_id = self.definitions.as_local_node_id(def_id).unwrap(); let seen = self.freevars_seen .entry(function_id) .or_insert_with(|| NodeMap()); if let Some(&index) = seen.get(&node_id) { - def = Def::Upvar(node_def_id, node_id, index, function_id); + def = Def::Upvar(def_id, index, function_id); continue; } let vec = self.freevars @@ -2552,7 +2590,7 @@ impl<'a> Resolver<'a> { span: span, }); - def = Def::Upvar(node_def_id, node_id, depth, function_id); + def = Def::Upvar(def_id, depth, function_id); seen.insert(node_id, depth); } ItemRibKind | MethodRibKind(_) => { @@ -2618,18 +2656,11 @@ impl<'a> Resolver<'a> { .collect::>(); let containing_module; - match self.resolve_module_path(&module_path, UseLexicalScope, span) { + match self.resolve_module_path(&module_path, UseLexicalScope, Some(span)) { Failed(err) => { - let (span, msg) = match err { - Some((span, msg)) => (span, msg), - None => { - let msg = format!("Use of undeclared type or module `{}`", - names_to_string(&module_path)); - (span, msg) - } - }; - - resolve_error(self, span, ResolutionError::FailedToResolve(&msg)); + if let Some((span, msg)) = err { + resolve_error(self, span, ResolutionError::FailedToResolve(&msg)); + } return Err(true); } Indeterminate => return Err(false), @@ -2639,11 +2670,9 @@ impl<'a> Resolver<'a> { } let name = segments.last().unwrap().identifier.name; - let result = self.resolve_name_in_module(containing_module, name, namespace, false, true); - result.success().map(|binding| { - self.check_privacy(name, binding, span); - binding - }).ok_or(false) + let result = + self.resolve_name_in_module(containing_module, name, namespace, false, Some(span)); + result.success().ok_or(false) } /// Invariant: This must be called only during main resolution, not during @@ -2657,21 +2686,11 @@ impl<'a> Resolver<'a> { let root_module = self.graph_root; let containing_module; - match self.resolve_module_path_from_root(root_module, - &module_path, - 0, - span) { + match self.resolve_module_path_from_root(root_module, &module_path, 0, Some(span)) { Failed(err) => { - let (span, msg) = match err { - Some((span, msg)) => (span, msg), - None => { - let msg = format!("Use of undeclared module `::{}`", - names_to_string(&module_path)); - (span, msg) - } - }; - - resolve_error(self, span, ResolutionError::FailedToResolve(&msg)); + if let Some((span, msg)) = err { + resolve_error(self, span, ResolutionError::FailedToResolve(&msg)); + } return Err(true); } @@ -2683,11 +2702,9 @@ impl<'a> Resolver<'a> { } let name = segments.last().unwrap().name(); - let result = self.resolve_name_in_module(containing_module, name, namespace, false, true); - result.success().map(|binding| { - self.check_privacy(name, binding, span); - binding - }).ok_or(false) + let result = + self.resolve_name_in_module(containing_module, name, namespace, false, Some(span)); + result.success().ok_or(false) } fn with_no_errors(&mut self, f: F) -> T @@ -2715,7 +2732,6 @@ impl<'a> Resolver<'a> { fn with_empty_ribs(&mut self, f: F) -> T where F: FnOnce(&mut Resolver<'a>) -> T, { - use ::std::mem::replace; let value_ribs = replace(&mut self.value_ribs, Vec::new()); let type_ribs = replace(&mut self.type_ribs, Vec::new()); let label_ribs = replace(&mut self.label_ribs, Vec::new()); @@ -2743,8 +2759,8 @@ impl<'a> Resolver<'a> { // Look for a field with the same name in the current self_type. if let Some(resolution) = self.def_map.get(&node_id) { match resolution.base_def { - Def::Enum(did) | Def::TyAlias(did) | - Def::Struct(did) | Def::Variant(_, did) if resolution.depth == 0 => { + Def::Enum(did) | Def::TyAlias(did) | Def::Union(did) | + Def::Struct(did) | Def::Variant(did) if resolution.depth == 0 => { if let Some(fields) = self.structs.get(&did) { if fields.iter().any(|&field_name| name == field_name) { return Field; @@ -2771,8 +2787,7 @@ impl<'a> Resolver<'a> { } fn find_best_match(&mut self, name: &str) -> SuggestionType { - if let Some(macro_name) = self.session.available_macros - .borrow().iter().find(|n| n.as_str() == name) { + if let Some(macro_name) = self.macro_names.iter().find(|n| n.as_str() == name) { return SuggestionType::Macro(format!("{}!", macro_name)); } @@ -2788,11 +2803,11 @@ impl<'a> Resolver<'a> { } SuggestionType::NotFound } - fn resolve_labeled_block(&mut self, label: Option, id: NodeId, block: &Block) { + fn resolve_labeled_block(&mut self, label: Option, id: NodeId, block: &Block) { if let Some(label) = label { let def = Def::Label(id); self.with_label_rib(|this| { - this.label_ribs.last_mut().unwrap().bindings.insert(label, def); + this.label_ribs.last_mut().unwrap().bindings.insert(label.node, def); this.visit_block(block); }); } else { @@ -2814,7 +2829,7 @@ impl<'a> Resolver<'a> { if let Some(path_res) = self.resolve_possibly_assoc_item(expr.id, maybe_qself.as_ref(), path, ValueNS) { // Check if struct variant - let is_struct_variant = if let Def::Variant(_, variant_id) = path_res.base_def { + let is_struct_variant = if let Def::Variant(variant_id) = path_res.base_def { self.structs.contains_key(&variant_id) } else { false @@ -2931,7 +2946,7 @@ impl<'a> Resolver<'a> { let mut context = UnresolvedNameContext::Other; let mut def = Def::Err; if !msg.is_empty() { - msg = format!(". Did you mean {}?", msg); + msg = format!("did you mean {}?", msg); } else { // we display a help message if this is a module let name_path = path.segments.iter() @@ -2940,9 +2955,9 @@ impl<'a> Resolver<'a> { match self.resolve_module_path(&name_path[..], UseLexicalScope, - expr.span) { + Some(expr.span)) { Success(e) => { - if let Some(def_type) = e.def { + if let Some(def_type) = e.def() { def = def_type; } context = UnresolvedNameContext::PathIsMod(parent); @@ -2969,7 +2984,7 @@ impl<'a> Resolver<'a> { visit::walk_expr(self, expr); } - ExprKind::Struct(ref path, _, _) => { + ExprKind::Struct(ref path, ..) => { // Resolve the path to the structure it goes to. We don't // check to ensure that the path is actually a structure; that // is checked later during typeck. @@ -2991,19 +3006,6 @@ impl<'a> Resolver<'a> { visit::walk_expr(self, expr); } - ExprKind::Loop(_, Some(label)) | ExprKind::While(_, _, Some(label)) => { - self.with_label_rib(|this| { - let def = Def::Label(expr.id); - - { - let rib = this.label_ribs.last_mut().unwrap(); - rib.bindings.insert(label.node, def); - } - - visit::walk_expr(this, expr); - }) - } - ExprKind::Break(Some(label)) | ExprKind::Continue(Some(label)) => { match self.search_label(label.node) { None => { @@ -3026,19 +3028,26 @@ impl<'a> Resolver<'a> { self.visit_expr(subexpression); self.value_ribs.push(Rib::new(NormalRibKind)); - self.resolve_pattern(pattern, PatternSource::IfLet, &mut HashMap::new()); + self.resolve_pattern(pattern, PatternSource::IfLet, &mut FnvHashMap()); self.visit_block(if_block); self.value_ribs.pop(); optional_else.as_ref().map(|expr| self.visit_expr(expr)); } + ExprKind::Loop(ref block, label) => self.resolve_labeled_block(label, expr.id, &block), + + ExprKind::While(ref subexpression, ref block, label) => { + self.visit_expr(subexpression); + self.resolve_labeled_block(label, expr.id, &block); + } + ExprKind::WhileLet(ref pattern, ref subexpression, ref block, label) => { self.visit_expr(subexpression); self.value_ribs.push(Rib::new(NormalRibKind)); - self.resolve_pattern(pattern, PatternSource::WhileLet, &mut HashMap::new()); + self.resolve_pattern(pattern, PatternSource::WhileLet, &mut FnvHashMap()); - self.resolve_labeled_block(label.map(|l| l.node), expr.id, block); + self.resolve_labeled_block(label, expr.id, block); self.value_ribs.pop(); } @@ -3046,9 +3055,9 @@ impl<'a> Resolver<'a> { ExprKind::ForLoop(ref pattern, ref subexpression, ref block, label) => { self.visit_expr(subexpression); self.value_ribs.push(Rib::new(NormalRibKind)); - self.resolve_pattern(pattern, PatternSource::For, &mut HashMap::new()); + self.resolve_pattern(pattern, PatternSource::For, &mut FnvHashMap()); - self.resolve_labeled_block(label.map(|l| l.node), expr.id, block); + self.resolve_labeled_block(label, expr.id, block); self.value_ribs.pop(); } @@ -3083,7 +3092,7 @@ impl<'a> Resolver<'a> { let traits = self.get_traits_containing_item(name.node.name); self.trait_map.insert(expr.id, traits); } - ExprKind::MethodCall(name, _, _) => { + ExprKind::MethodCall(name, ..) => { debug!("(recording candidate traits for expr) recording traits for {}", expr.id); let traits = self.get_traits_containing_item(name.node.name); @@ -3128,7 +3137,7 @@ impl<'a> Resolver<'a> { let mut collected_traits = Vec::new(); module.for_each_child(|name, ns, binding| { if ns != TypeNS { return } - if let Some(Def::Trait(_)) = binding.def() { + if let Def::Trait(_) = binding.def() { collected_traits.push((name, binding)); } }); @@ -3136,7 +3145,7 @@ impl<'a> Resolver<'a> { } for &(trait_name, binding) in traits.as_ref().unwrap().iter() { - let trait_def_id = binding.def().unwrap().def_id(); + let trait_def_id = binding.def().def_id(); if this.trait_item_map.contains_key(&(name, trait_def_id)) { let mut import_id = None; if let NameBindingKind::Import { directive, .. } = binding.kind { @@ -3151,16 +3160,13 @@ impl<'a> Resolver<'a> { }; search_in_module(self, search_module); - match search_module.parent_link { - NoParentLink | ModuleParentLink(..) => { - if !search_module.no_implicit_prelude.get() { - self.prelude.map(|prelude| search_in_module(self, prelude)); - } - break; - } - BlockParentLink(parent_module, _) => { - search_module = parent_module; + if let ModuleKind::Block(..) = search_module.kind { + search_module = search_module.parent.unwrap(); + } else { + if !search_module.no_implicit_prelude { + self.prelude.map(|prelude| search_in_module(self, prelude)); } + break; } } @@ -3195,8 +3201,8 @@ impl<'a> Resolver<'a> { if name_binding.is_import() { return; } // collect results based on the filter function - if let Some(def) = name_binding.def() { - if name == lookup_name && ns == namespace && filter_fn(def) { + if name == lookup_name && ns == namespace { + if filter_fn(name_binding.def()) { // create the path let ident = ast::Ident::with_empty_ctxt(name); let params = PathParameters::none(); @@ -3226,11 +3232,11 @@ impl<'a> Resolver<'a> { } // collect submodules to explore - if let Some(module) = name_binding.module() { + if let Ok(module) = name_binding.module() { // form the path - let path_segments = match module.parent_link { - NoParentLink => path_segments.clone(), - ModuleParentLink(_, name) => { + let path_segments = match module.kind { + _ if module.parent.is_none() => path_segments.clone(), + ModuleKind::Def(_, name) => { let mut paths = path_segments.clone(); let ident = ast::Ident::with_empty_ctxt(name); let params = PathParameters::none(); @@ -3247,7 +3253,7 @@ impl<'a> Resolver<'a> { if !in_module_is_extern || name_binding.vis == ty::Visibility::Public { // add the module to the lookup let is_extern = in_module_is_extern || name_binding.is_extern_crate(); - if !worklist.iter().any(|&(m, _, _)| m.def == module.def) { + if !worklist.iter().any(|&(m, ..)| m.def() == module.def()) { worklist.push((module, path_segments, is_extern)); } } @@ -3274,28 +3280,22 @@ impl<'a> Resolver<'a> { ast::Visibility::Crate(_) => return ty::Visibility::Restricted(ast::CRATE_NODE_ID), ast::Visibility::Restricted { ref path, id } => (path, id), ast::Visibility::Inherited => { - let current_module = - self.get_nearest_normal_module_parent_or_self(self.current_module); - let id = - self.definitions.as_local_node_id(current_module.def_id().unwrap()).unwrap(); - return ty::Visibility::Restricted(id); + return ty::Visibility::Restricted(self.current_module.normal_ancestor_id.unwrap()); } }; let segments: Vec<_> = path.segments.iter().map(|seg| seg.identifier.name).collect(); let mut path_resolution = err_path_resolution(); - let vis = match self.resolve_module_path(&segments, DontUseLexicalScope, path.span) { + let vis = match self.resolve_module_path(&segments, DontUseLexicalScope, Some(path.span)) { Success(module) => { - let def = module.def.unwrap(); - path_resolution = PathResolution::new(def); - ty::Visibility::Restricted(self.definitions.as_local_node_id(def.def_id()).unwrap()) + path_resolution = PathResolution::new(module.def().unwrap()); + ty::Visibility::Restricted(module.normal_ancestor_id.unwrap()) } - Failed(Some((span, msg))) => { - self.session.span_err(span, &format!("failed to resolve module path. {}", msg)); - ty::Visibility::Public - } - _ => { - self.session.span_err(path.span, "unresolved module path"); + Indeterminate => unreachable!(), + Failed(err) => { + if let Some((span, msg)) = err { + self.session.span_err(span, &format!("failed to resolve module path. {}", msg)); + } ty::Visibility::Public } }; @@ -3308,20 +3308,27 @@ impl<'a> Resolver<'a> { } fn is_accessible(&self, vis: ty::Visibility) -> bool { - let current_module = self.get_nearest_normal_module_parent_or_self(self.current_module); - let node_id = self.definitions.as_local_node_id(current_module.def_id().unwrap()).unwrap(); - vis.is_accessible_from(node_id, self) + vis.is_accessible_from(self.current_module.normal_ancestor_id.unwrap(), self) } - fn check_privacy(&mut self, name: Name, binding: &'a NameBinding<'a>, span: Span) { - if !self.is_accessible(binding.vis) { - self.privacy_errors.push(PrivacyError(span, name, binding)); - } + fn is_accessible_from(&self, vis: ty::Visibility, module: Module<'a>) -> bool { + vis.is_accessible_from(module.normal_ancestor_id.unwrap(), self) } - fn report_privacy_errors(&self) { - if self.privacy_errors.len() == 0 { return } - let mut reported_spans = HashSet::new(); + fn report_errors(&self) { + let mut reported_spans = FnvHashSet(); + + for &AmbiguityError { span, name, b1, b2 } in &self.ambiguity_errors { + if !reported_spans.insert(span) { continue } + let msg1 = format!("`{}` could resolve to the name imported here", name); + let msg2 = format!("`{}` could also resolve to the name imported here", name); + self.session.struct_span_err(span, &format!("`{}` is ambiguous", name)) + .span_note(b1.span, &msg1) + .span_note(b2.span, &msg2) + .note(&format!("Consider adding an explicit import of `{}` to disambiguate", name)) + .emit(); + } + for &PrivacyError(span, name, binding) in &self.privacy_errors { if !reported_spans.insert(span) { continue } if binding.is_extern_crate() { @@ -3330,7 +3337,7 @@ impl<'a> Resolver<'a> { let msg = format!("extern crate `{}` is private", name); self.session.add_lint(lint::builtin::INACCESSIBLE_EXTERN_CRATE, node_id, span, msg); } else { - let def = binding.def().unwrap(); + let def = binding.def(); self.session.span_err(span, &format!("{} `{}` is private", def.kind_name(), name)); } } @@ -3347,10 +3354,10 @@ impl<'a> Resolver<'a> { return self.report_conflict(parent, name, ns, old_binding, binding); } - let container = match parent.def { - Some(Def::Mod(_)) => "module", - Some(Def::Trait(_)) => "trait", - None => "block", + let container = match parent.kind { + ModuleKind::Def(Def::Mod(_), _) => "module", + ModuleKind::Def(Def::Trait(_), _) => "trait", + ModuleKind::Block(..) => "block", _ => "enum", }; @@ -3363,9 +3370,9 @@ impl<'a> Resolver<'a> { let msg = { let kind = match (ns, old_binding.module()) { (ValueNS, _) => "a value", - (TypeNS, Some(module)) if module.extern_crate_id.is_some() => "an extern crate", - (TypeNS, Some(module)) if module.is_normal() => "a module", - (TypeNS, Some(module)) if module.is_trait() => "a trait", + (TypeNS, Ok(module)) if module.extern_crate_id.is_some() => "an extern crate", + (TypeNS, Ok(module)) if module.is_normal() => "a module", + (TypeNS, Ok(module)) if module.is_trait() => "a trait", (TypeNS, _) => "a type", }; format!("{} named `{}` has already been {} in this {}", @@ -3373,15 +3380,27 @@ impl<'a> Resolver<'a> { }; let mut err = match (old_binding.is_extern_crate(), binding.is_extern_crate()) { - (true, true) => struct_span_err!(self.session, span, E0259, "{}", msg), + (true, true) => { + let mut e = struct_span_err!(self.session, span, E0259, "{}", msg); + e.span_label(span, &format!("`{}` was already imported", name)); + e + }, (true, _) | (_, true) if binding.is_import() || old_binding.is_import() => { let mut e = struct_span_err!(self.session, span, E0254, "{}", msg); e.span_label(span, &"already imported"); e }, - (true, _) | (_, true) => struct_span_err!(self.session, span, E0260, "{}", msg), + (true, _) | (_, true) => { + let mut e = struct_span_err!(self.session, span, E0260, "{}", msg); + e.span_label(span, &format!("`{}` already imported", name)); + e + }, _ => match (old_binding.is_import(), binding.is_import()) { - (false, false) => struct_span_err!(self.session, span, E0428, "{}", msg), + (false, false) => { + let mut e = struct_span_err!(self.session, span, E0428, "{}", msg); + e.span_label(span, &format!("already defined")); + e + }, (true, true) => { let mut e = struct_span_err!(self.session, span, E0252, "{}", msg); e.span_label(span, &format!("already imported")); @@ -3485,17 +3504,15 @@ fn module_to_string(module: Module) -> String { let mut names = Vec::new(); fn collect_mod(names: &mut Vec, module: Module) { - match module.parent_link { - NoParentLink => {} - ModuleParentLink(ref module, name) => { + if let ModuleKind::Def(_, name) = module.kind { + if let Some(parent) = module.parent { names.push(name); - collect_mod(names, module); - } - BlockParentLink(ref module, _) => { - // danger, shouldn't be ident? - names.push(token::intern("")); - collect_mod(names, module); + collect_mod(names, parent); } + } else { + // danger, shouldn't be ident? + names.push(token::intern("")); + collect_mod(names, module); } } collect_mod(&mut names, module); diff --git a/src/librustc_resolve/macros.rs b/src/librustc_resolve/macros.rs new file mode 100644 index 0000000000..3a9fb84519 --- /dev/null +++ b/src/librustc_resolve/macros.rs @@ -0,0 +1,262 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use Resolver; +use rustc::middle::cstore::LoadedMacro; +use rustc::util::nodemap::FnvHashMap; +use std::cell::RefCell; +use std::mem; +use std::rc::Rc; +use syntax::ast::{self, Name}; +use syntax::errors::DiagnosticBuilder; +use syntax::ext::base::{self, MultiModifier, MultiDecorator, MultiItemModifier}; +use syntax::ext::base::{NormalTT, Resolver as SyntaxResolver, SyntaxExtension}; +use syntax::ext::expand::{Expansion, Invocation, InvocationKind}; +use syntax::ext::hygiene::Mark; +use syntax::ext::tt::macro_rules; +use syntax::feature_gate::{self, emit_feature_err}; +use syntax::parse::token::{self, intern}; +use syntax::util::lev_distance::find_best_match_for_name; +use syntax::visit::{self, Visitor}; +use syntax_pos::Span; + +#[derive(Clone, Default)] +pub struct ExpansionData { + module: Rc, +} + +// FIXME(jseyfried): merge with `::ModuleS`. +#[derive(Default)] +struct ModuleData { + parent: Option>, + macros: RefCell>>, + macros_escape: bool, +} + +impl<'a> base::Resolver for Resolver<'a> { + fn next_node_id(&mut self) -> ast::NodeId { + self.session.next_node_id() + } + + fn visit_expansion(&mut self, mark: Mark, expansion: &Expansion) { + expansion.visit_with(&mut ExpansionVisitor { + current_module: self.expansion_data[&mark.as_u32()].module.clone(), + resolver: self, + }); + } + + fn add_macro(&mut self, scope: Mark, mut def: ast::MacroDef) { + if &def.ident.name.as_str() == "macro_rules" { + self.session.span_err(def.span, "user-defined macros may not be named `macro_rules`"); + } + if def.use_locally { + let ext = macro_rules::compile(&self.session.parse_sess, &def); + self.add_ext(scope, def.ident, Rc::new(ext)); + } + if def.export { + def.id = self.next_node_id(); + self.exported_macros.push(def); + } + } + + fn add_ext(&mut self, scope: Mark, ident: ast::Ident, ext: Rc) { + if let NormalTT(..) = *ext { + self.macro_names.insert(ident.name); + } + + let mut module = self.expansion_data[&scope.as_u32()].module.clone(); + while module.macros_escape { + module = module.parent.clone().unwrap(); + } + module.macros.borrow_mut().insert(ident.name, ext); + } + + fn add_expansions_at_stmt(&mut self, id: ast::NodeId, macros: Vec) { + self.macros_at_scope.insert(id, macros); + } + + fn find_attr_invoc(&mut self, attrs: &mut Vec) -> Option { + for i in 0..attrs.len() { + let name = intern(&attrs[i].name()); + match self.expansion_data[&0].module.macros.borrow().get(&name) { + Some(ext) => match **ext { + MultiModifier(..) | MultiDecorator(..) | SyntaxExtension::AttrProcMacro(..) => { + return Some(attrs.remove(i)) + } + _ => {} + }, + None => {} + } + } + None + } + + fn resolve_invoc(&mut self, scope: Mark, invoc: &Invocation) -> Option> { + let (name, span) = match invoc.kind { + InvocationKind::Bang { ref mac, .. } => { + let path = &mac.node.path; + if path.segments.len() > 1 || path.global || + !path.segments[0].parameters.is_empty() { + self.session.span_err(path.span, + "expected macro name without module separators"); + return None; + } + (path.segments[0].identifier.name, path.span) + } + InvocationKind::Attr { ref attr, .. } => (intern(&*attr.name()), attr.span), + }; + + let mut module = self.expansion_data[&scope.as_u32()].module.clone(); + loop { + if let Some(ext) = module.macros.borrow().get(&name) { + return Some(ext.clone()); + } + match module.parent.clone() { + Some(parent) => module = parent, + None => break, + } + } + + let mut err = + self.session.struct_span_err(span, &format!("macro undefined: '{}!'", name)); + self.suggest_macro_name(&name.as_str(), &mut err); + err.emit(); + None + } + + fn resolve_derive_mode(&mut self, ident: ast::Ident) -> Option> { + self.derive_modes.get(&ident.name).cloned() + } +} + +impl<'a> Resolver<'a> { + fn suggest_macro_name(&mut self, name: &str, err: &mut DiagnosticBuilder<'a>) { + if let Some(suggestion) = find_best_match_for_name(self.macro_names.iter(), name, None) { + if suggestion != name { + err.help(&format!("did you mean `{}!`?", suggestion)); + } else { + err.help(&format!("have you added the `#[macro_use]` on the module/import?")); + } + } + } + + fn insert_custom_derive(&mut self, name: &str, ext: Rc, sp: Span) { + if !self.session.features.borrow().rustc_macro { + let sess = &self.session.parse_sess; + let msg = "loading custom derive macro crates is experimentally supported"; + emit_feature_err(sess, "rustc_macro", sp, feature_gate::GateIssue::Language, msg); + } + if self.derive_modes.insert(token::intern(name), ext).is_some() { + self.session.span_err(sp, &format!("cannot shadow existing derive mode `{}`", name)); + } + } +} + +struct ExpansionVisitor<'b, 'a: 'b> { + resolver: &'b mut Resolver<'a>, + current_module: Rc, +} + +impl<'a, 'b> ExpansionVisitor<'a, 'b> { + fn visit_invoc(&mut self, id: ast::NodeId) { + self.resolver.expansion_data.insert(id.as_u32(), ExpansionData { + module: self.current_module.clone(), + }); + } + + // does this attribute list contain "macro_use"? + fn contains_macro_use(&mut self, attrs: &[ast::Attribute]) -> bool { + for attr in attrs { + if attr.check_name("macro_escape") { + let msg = "macro_escape is a deprecated synonym for macro_use"; + let mut err = self.resolver.session.struct_span_warn(attr.span, msg); + if let ast::AttrStyle::Inner = attr.node.style { + err.help("consider an outer attribute, #[macro_use] mod ...").emit(); + } else { + err.emit(); + } + } else if !attr.check_name("macro_use") { + continue; + } + + if !attr.is_word() { + self.resolver.session.span_err(attr.span, + "arguments to macro_use are not allowed here"); + } + return true; + } + + false + } +} + +macro_rules! method { + ($visit:ident: $ty:ty, $invoc:path, $walk:ident) => { + fn $visit(&mut self, node: &$ty) { + match node.node { + $invoc(..) => self.visit_invoc(node.id), + _ => visit::$walk(self, node), + } + } + } +} + +impl<'a, 'b> Visitor for ExpansionVisitor<'a, 'b> { + method!(visit_trait_item: ast::TraitItem, ast::TraitItemKind::Macro, walk_trait_item); + method!(visit_impl_item: ast::ImplItem, ast::ImplItemKind::Macro, walk_impl_item); + method!(visit_stmt: ast::Stmt, ast::StmtKind::Mac, walk_stmt); + method!(visit_expr: ast::Expr, ast::ExprKind::Mac, walk_expr); + method!(visit_pat: ast::Pat, ast::PatKind::Mac, walk_pat); + method!(visit_ty: ast::Ty, ast::TyKind::Mac, walk_ty); + + fn visit_item(&mut self, item: &ast::Item) { + match item.node { + ast::ItemKind::Mac(..) if item.id == ast::DUMMY_NODE_ID => {} // Scope placeholder + ast::ItemKind::Mac(..) => self.visit_invoc(item.id), + ast::ItemKind::Mod(..) => { + let module_data = ModuleData { + parent: Some(self.current_module.clone()), + macros: RefCell::new(FnvHashMap()), + macros_escape: self.contains_macro_use(&item.attrs), + }; + let orig_module = mem::replace(&mut self.current_module, Rc::new(module_data)); + visit::walk_item(self, item); + self.current_module = orig_module; + } + ast::ItemKind::ExternCrate(..) => { + // We need to error on `#[macro_use] extern crate` when it isn't at the + // crate root, because `$crate` won't work properly. + // FIXME(jseyfried): This will be nicer once `ModuleData` is merged with `ModuleS`. + let is_crate_root = self.current_module.parent.as_ref().unwrap().parent.is_none(); + for def in self.resolver.crate_loader.load_macros(item, is_crate_root) { + match def { + LoadedMacro::Def(def) => self.resolver.add_macro(Mark::root(), def), + LoadedMacro::CustomDerive(name, ext) => { + self.resolver.insert_custom_derive(&name, ext, item.span); + } + } + } + visit::walk_item(self, item); + } + _ => visit::walk_item(self, item), + } + } + + fn visit_block(&mut self, block: &ast::Block) { + let module_data = ModuleData { + parent: Some(self.current_module.clone()), + macros: RefCell::new(FnvHashMap()), + macros_escape: false, + }; + let orig_module = mem::replace(&mut self.current_module, Rc::new(module_data)); + visit::walk_block(self, block); + self.current_module = orig_module; + } +} diff --git a/src/librustc_resolve/resolve_imports.rs b/src/librustc_resolve/resolve_imports.rs index 1e40aa7d18..fe21e52959 100644 --- a/src/librustc_resolve/resolve_imports.rs +++ b/src/librustc_resolve/resolve_imports.rs @@ -8,6 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use self::Determinacy::*; use self::ImportDirectiveSubclass::*; use Module; @@ -26,7 +27,7 @@ use rustc::hir::def::*; use syntax::ast::{NodeId, Name}; use syntax::util::lev_distance::find_best_match_for_name; -use syntax_pos::{Span, DUMMY_SP}; +use syntax_pos::Span; use std::cell::{Cell, RefCell}; @@ -36,25 +37,35 @@ impl<'a> Resolver<'a> { } } +#[derive(Copy, Clone, Debug)] +pub enum Determinacy { + Determined, + Undetermined, +} + /// Contains data for specific types of import directives. #[derive(Clone, Debug)] -pub enum ImportDirectiveSubclass { +pub enum ImportDirectiveSubclass<'a> { SingleImport { target: Name, source: Name, - type_determined: Cell, - value_determined: Cell, + value_result: Cell, Determinacy>>, + type_result: Cell, Determinacy>>, + }, + GlobImport { + is_prelude: bool, + max_vis: Cell, // The visibility of the greatest reexport. + // n.b. `max_vis` is only used in `finalize_import` to check for reexport errors. }, - GlobImport { is_prelude: bool }, } -impl ImportDirectiveSubclass { +impl<'a> ImportDirectiveSubclass<'a> { pub fn single(target: Name, source: Name) -> Self { SingleImport { target: target, source: source, - type_determined: Cell::new(false), - value_determined: Cell::new(false), + type_result: Cell::new(Err(Undetermined)), + value_result: Cell::new(Err(Undetermined)), } } } @@ -63,11 +74,12 @@ impl ImportDirectiveSubclass { #[derive(Debug,Clone)] pub struct ImportDirective<'a> { pub id: NodeId, + parent: Module<'a>, module_path: Vec, - target_module: Cell>>, // the resolution of `module_path` - subclass: ImportDirectiveSubclass, + imported_module: Cell>>, // the resolution of `module_path` + subclass: ImportDirectiveSubclass<'a>, span: Span, - vis: ty::Visibility, // see note in ImportResolutionPerNamespace about how to use this + vis: Cell, } impl<'a> ImportDirective<'a> { @@ -97,6 +109,7 @@ enum SingleImports<'a> { } impl<'a> Default for SingleImports<'a> { + /// Creates a `SingleImports<'a>` of None type. fn default() -> Self { SingleImports::None } @@ -132,130 +145,175 @@ impl<'a> NameResolution<'a> { _ => None, // The binding could be shadowed by a single import, so it is not known. }) } +} + +impl<'a> Resolver<'a> { + fn resolution(&self, module: Module<'a>, name: Name, ns: Namespace) + -> &'a RefCell> { + *module.resolutions.borrow_mut().entry((name, ns)) + .or_insert_with(|| self.arenas.alloc_name_resolution()) + } + + /// Attempts to resolve the supplied name in the given module for the given namespace. + /// If successful, returns the binding corresponding to the name. + /// Invariant: if `record_used` is `Some`, import resolution must be complete. + pub fn resolve_name_in_module(&mut self, + module: Module<'a>, + name: Name, + ns: Namespace, + allow_private_imports: bool, + record_used: Option) + -> ResolveResult<&'a NameBinding<'a>> { + self.populate_module_if_necessary(module); + + let resolution = self.resolution(module, name, ns); + let resolution = match resolution.borrow_state() { + ::std::cell::BorrowState::Unused => resolution.borrow_mut(), + _ => return Failed(None), // This happens when there is a cycle of imports + }; + + let new_import_semantics = self.new_import_semantics; + let is_disallowed_private_import = |binding: &NameBinding| { + !new_import_semantics && !allow_private_imports && // disallowed + binding.vis != ty::Visibility::Public && binding.is_import() // non-`pub` import + }; + + if let Some(span) = record_used { + if let Some(binding) = resolution.binding { + if is_disallowed_private_import(binding) { + return Failed(None); + } + if self.record_use(name, ns, binding, span) { + return Success(self.dummy_binding); + } + if !self.is_accessible(binding.vis) { + self.privacy_errors.push(PrivacyError(span, name, binding)); + } + } + + return resolution.binding.map(Success).unwrap_or(Failed(None)); + } + + // If the resolution doesn't depend on glob definability, check privacy and return. + if let Some(result) = self.try_result(&resolution, ns) { + return result.and_then(|binding| { + if self.is_accessible(binding.vis) && !is_disallowed_private_import(binding) || + binding.is_extern_crate() { // c.f. issue #37020 + Success(binding) + } else { + Failed(None) + } + }); + } + + // Check if the globs are determined + for directive in module.globs.borrow().iter() { + if self.is_accessible(directive.vis.get()) { + if let Some(module) = directive.imported_module.get() { + let result = self.resolve_name_in_module(module, name, ns, true, None); + if let Indeterminate = result { + return Indeterminate; + } + } else { + return Indeterminate; + } + } + } + + Failed(None) + } // Returns Some(the resolution of the name), or None if the resolution depends // on whether more globs can define the name. - fn try_result(&self, ns: Namespace, allow_private_imports: bool) + fn try_result(&mut self, resolution: &NameResolution<'a>, ns: Namespace) -> Option>> { - match self.binding { + match resolution.binding { Some(binding) if !binding.is_glob_import() => - return Some(Success(binding)), - _ => {} // Items and single imports are not shadowable + return Some(Success(binding)), // Items and single imports are not shadowable. + _ => {} }; // Check if a single import can still define the name. - match self.single_imports { - SingleImports::None => {}, + match resolution.single_imports { SingleImports::AtLeastOne => return Some(Indeterminate), - SingleImports::MaybeOne(directive) => { - // If (1) we don't allow private imports, (2) no public single import can define - // the name, and (3) no public glob has defined the name, the resolution depends - // on whether more globs can define the name. - if !allow_private_imports && directive.vis != ty::Visibility::Public && - !self.binding.map(NameBinding::is_pseudo_public).unwrap_or(false) { - return None; - } - - let target_module = match directive.target_module.get() { - Some(target_module) => target_module, + SingleImports::MaybeOne(directive) if self.is_accessible(directive.vis.get()) => { + let module = match directive.imported_module.get() { + Some(module) => module, None => return Some(Indeterminate), }; let name = match directive.subclass { SingleImport { source, .. } => source, GlobImport { .. } => unreachable!(), }; - match target_module.resolve_name(name, ns, false) { + match self.resolve_name_in_module(module, name, ns, true, None) { Failed(_) => {} _ => return Some(Indeterminate), } } + SingleImports::MaybeOne(_) | SingleImports::None => {}, } - self.binding.map(Success) - } -} - -impl<'a> ::ModuleS<'a> { - fn resolution(&self, name: Name, ns: Namespace) -> &'a RefCell> { - *self.resolutions.borrow_mut().entry((name, ns)) - .or_insert_with(|| self.arenas.alloc_name_resolution()) - } - - pub fn resolve_name(&self, name: Name, ns: Namespace, allow_private_imports: bool) - -> ResolveResult<&'a NameBinding<'a>> { - let resolution = self.resolution(name, ns); - let resolution = match resolution.borrow_state() { - ::std::cell::BorrowState::Unused => resolution.borrow_mut(), - _ => return Failed(None), // This happens when there is a cycle of imports - }; - - if let Some(result) = resolution.try_result(ns, allow_private_imports) { - // If the resolution doesn't depend on glob definability, check privacy and return. - return result.and_then(|binding| { - let allowed = allow_private_imports || !binding.is_import() || - binding.is_pseudo_public(); - if allowed { Success(binding) } else { Failed(None) } - }); - } - - // Check if the globs are determined - for directive in self.globs.borrow().iter() { - if !allow_private_imports && directive.vis != ty::Visibility::Public { continue } - match directive.target_module.get() { - None => return Indeterminate, - Some(target_module) => match target_module.resolve_name(name, ns, false) { - Indeterminate => return Indeterminate, - _ => {} - } - } - } - - Failed(None) + resolution.binding.map(Success) } - pub fn add_import_directive(&self, + // Add an import directive to the current module. + pub fn add_import_directive(&mut self, module_path: Vec, - subclass: ImportDirectiveSubclass, + subclass: ImportDirectiveSubclass<'a>, span: Span, id: NodeId, vis: ty::Visibility) { + let current_module = self.current_module; let directive = self.arenas.alloc_import_directive(ImportDirective { + parent: current_module, module_path: module_path, - target_module: Cell::new(None), + imported_module: Cell::new(None), subclass: subclass, span: span, id: id, - vis: vis, + vis: Cell::new(vis), }); - self.unresolved_imports.borrow_mut().push(directive); + self.indeterminate_imports.push(directive); match directive.subclass { SingleImport { target, .. } => { for &ns in &[ValueNS, TypeNS] { - self.resolution(target, ns).borrow_mut().single_imports - .add_directive(directive); + let mut resolution = self.resolution(current_module, target, ns).borrow_mut(); + resolution.single_imports.add_directive(directive); } } // We don't add prelude imports to the globs since they only affect lexical scopes, // which are not relevant to import resolution. - GlobImport { is_prelude: true } => {} - GlobImport { .. } => self.globs.borrow_mut().push(directive), + GlobImport { is_prelude: true, .. } => {} + GlobImport { .. } => self.current_module.globs.borrow_mut().push(directive), } } -} -impl<'a> Resolver<'a> { // Given a binding and an import directive that resolves to it, // return the corresponding binding defined by the import directive. fn import(&mut self, binding: &'a NameBinding<'a>, directive: &'a ImportDirective<'a>) -> NameBinding<'a> { + let vis = if binding.pseudo_vis().is_at_least(directive.vis.get(), self) || + !directive.is_glob() && binding.is_extern_crate() { // c.f. `PRIVATE_IN_PUBLIC` + directive.vis.get() + } else { + binding.pseudo_vis() + }; + + if let GlobImport { ref max_vis, .. } = directive.subclass { + if vis == directive.vis.get() || vis.is_at_least(max_vis.get(), self) { + max_vis.set(vis) + } + } + NameBinding { kind: NameBindingKind::Import { binding: binding, directive: directive, + used: Cell::new(false), }, span: directive.span, - vis: directive.vis, + vis: vis, } } @@ -265,10 +323,28 @@ impl<'a> Resolver<'a> { where T: ToNameBinding<'a> { let binding = self.arenas.alloc_name_binding(binding.to_name_binding()); - self.update_resolution(module, name, ns, |_, resolution| { + self.update_resolution(module, name, ns, |this, resolution| { if let Some(old_binding) = resolution.binding { if binding.is_glob_import() { - resolution.duplicate_globs.push(binding); + if !this.new_import_semantics || !old_binding.is_glob_import() { + resolution.duplicate_globs.push(binding); + } else if binding.def() != old_binding.def() { + resolution.binding = Some(this.arenas.alloc_name_binding(NameBinding { + kind: NameBindingKind::Ambiguity { + b1: old_binding, + b2: binding, + }, + vis: if old_binding.vis.is_at_least(binding.vis, this) { + old_binding.vis + } else { + binding.vis + }, + span: old_binding.span, + })); + } else if !old_binding.vis.is_at_least(binding.vis, this) { + // We are glob-importing the same item but with greater visibility. + resolution.binding = Some(binding); + } } else if old_binding.is_glob_import() { resolution.duplicate_globs.push(old_binding); resolution.binding = Some(binding); @@ -290,24 +366,30 @@ impl<'a> Resolver<'a> { { // Ensure that `resolution` isn't borrowed when defining in the module's glob importers, // during which the resolution might end up getting re-defined via a glob cycle. - let (new_binding, t) = { - let mut resolution = &mut *module.resolution(name, ns).borrow_mut(); - let was_known = resolution.binding().is_some(); + let (binding, t) = { + let mut resolution = &mut *self.resolution(module, name, ns).borrow_mut(); + let old_binding = resolution.binding(); let t = f(self, resolution); - if was_known { return t; } match resolution.binding() { - Some(binding) => (binding, t), + _ if !self.new_import_semantics && old_binding.is_some() => return t, None => return t, + Some(binding) => match old_binding { + Some(old_binding) if old_binding as *const _ == binding as *const _ => return t, + _ => (binding, t), + } } }; - // Define `new_binding` in `module`s glob importers. - if new_binding.is_importable() && new_binding.is_pseudo_public() { - for &(importer, directive) in module.glob_importers.borrow_mut().iter() { - let imported_binding = self.import(new_binding, directive); - let _ = self.try_define(importer, name, ns, imported_binding); + // Define `binding` in `module`s glob importers. + for directive in module.glob_importers.borrow_mut().iter() { + if match self.new_import_semantics { + true => self.is_accessible_from(binding.vis, directive.parent), + false => binding.vis == ty::Visibility::Public, + } { + let imported_binding = self.import(binding, directive); + let _ = self.try_define(directive.parent, name, ns, imported_binding); } } @@ -315,14 +397,6 @@ impl<'a> Resolver<'a> { } } -struct ImportResolvingError<'a> { - /// Module where the error happened - source_module: Module<'a>, - import_directive: &'a ImportDirective<'a>, - span: Span, - help: String, -} - struct ImportResolver<'a, 'b: 'a> { resolver: &'a mut Resolver<'b>, } @@ -359,104 +433,67 @@ impl<'a, 'b:'a> ImportResolver<'a, 'b> { /// point iteration. fn resolve_imports(&mut self) { let mut i = 0; - let mut prev_unresolved_imports = 0; - let mut errors = Vec::new(); + let mut prev_num_indeterminates = self.indeterminate_imports.len() + 1; - loop { - debug!("(resolving imports) iteration {}, {} imports left", i, self.unresolved_imports); + while self.indeterminate_imports.len() < prev_num_indeterminates { + prev_num_indeterminates = self.indeterminate_imports.len(); + debug!("(resolving imports) iteration {}, {} imports left", i, prev_num_indeterminates); - // Attempt to resolve imports in all local modules. - for module in self.arenas.local_modules().iter() { - self.current_module = module; - self.resolve_imports_in_current_module(&mut errors); - } + let mut imports = Vec::new(); + ::std::mem::swap(&mut imports, &mut self.indeterminate_imports); - if self.unresolved_imports == 0 { - debug!("(resolving imports) success"); - for module in self.arenas.local_modules().iter() { - self.finalize_resolutions_in(module, false); + for import in imports { + match self.resolve_import(&import) { + Failed(_) => self.determined_imports.push(import), + Indeterminate => self.indeterminate_imports.push(import), + Success(()) => self.determined_imports.push(import), } - break; } - if self.unresolved_imports == prev_unresolved_imports { - // resolving failed - // Report unresolved imports only if no hard error was already reported - // to avoid generating multiple errors on the same import. - // Imports that are still indeterminate at this point are actually blocked - // by errored imports, so there is no point reporting them. - for module in self.arenas.local_modules().iter() { - self.finalize_resolutions_in(module, errors.len() == 0); - } - for e in errors { - self.import_resolving_error(e) - } - break; + i += 1; + } + + for module in self.arenas.local_modules().iter() { + self.finalize_resolutions_in(module); + } + + let mut errors = false; + for i in 0 .. self.determined_imports.len() { + let import = self.determined_imports[i]; + if let Failed(err) = self.finalize_import(import) { + errors = true; + let (span, help) = match err { + Some((span, msg)) => (span, msg), + None => continue, + }; + + // If the error is a single failed import then create a "fake" import + // resolution for it so that later resolve stages won't complain. + self.import_dummy_binding(import); + let path = import_path_to_string(&import.module_path, &import.subclass); + let error = ResolutionError::UnresolvedImport(Some((&path, &help))); + resolve_error(self.resolver, span, error); } + } - i += 1; - prev_unresolved_imports = self.unresolved_imports; + // Report unresolved imports only if no hard error was already reported + // to avoid generating multiple errors on the same import. + if !errors { + if let Some(import) = self.indeterminate_imports.iter().next() { + let error = ResolutionError::UnresolvedImport(None); + resolve_error(self.resolver, import.span, error); + } } } // Define a "dummy" resolution containing a Def::Err as a placeholder for a // failed resolution - fn import_dummy_binding(&mut self, - source_module: Module<'b>, - directive: &'b ImportDirective<'b>) { + fn import_dummy_binding(&mut self, directive: &'b ImportDirective<'b>) { if let SingleImport { target, .. } = directive.subclass { - let dummy_binding = self.arenas.alloc_name_binding(NameBinding { - kind: NameBindingKind::Def(Def::Err), - span: DUMMY_SP, - vis: ty::Visibility::Public, - }); + let dummy_binding = self.dummy_binding; let dummy_binding = self.import(dummy_binding, directive); - - let _ = self.try_define(source_module, target, ValueNS, dummy_binding.clone()); - let _ = self.try_define(source_module, target, TypeNS, dummy_binding); - } - } - - /// Resolves an `ImportResolvingError` into the correct enum discriminant - /// and passes that on to `resolve_error`. - fn import_resolving_error(&mut self, e: ImportResolvingError<'b>) { - // If the error is a single failed import then create a "fake" import - // resolution for it so that later resolve stages won't complain. - self.import_dummy_binding(e.source_module, e.import_directive); - let path = import_path_to_string(&e.import_directive.module_path, - &e.import_directive.subclass); - resolve_error(self.resolver, - e.span, - ResolutionError::UnresolvedImport(Some((&path, &e.help)))); - } - - /// Attempts to resolve imports for the given module only. - fn resolve_imports_in_current_module(&mut self, errors: &mut Vec>) { - let mut imports = Vec::new(); - let mut unresolved_imports = self.current_module.unresolved_imports.borrow_mut(); - ::std::mem::swap(&mut imports, &mut unresolved_imports); - - for import_directive in imports { - match self.resolve_import(&import_directive) { - Failed(err) => { - let (span, help) = match err { - Some((span, msg)) => (span, format!(". {}", msg)), - None => (import_directive.span, String::new()), - }; - errors.push(ImportResolvingError { - source_module: self.current_module, - import_directive: import_directive, - span: span, - help: help, - }); - } - Indeterminate => unresolved_imports.push(import_directive), - Success(()) => { - // Decrement the count of unresolved imports. - assert!(self.unresolved_imports >= 1); - self.unresolved_imports -= 1; - } - } + let _ = self.try_define(directive.parent, target, ValueNS, dummy_binding.clone()); + let _ = self.try_define(directive.parent, target, TypeNS, dummy_binding); } } @@ -470,125 +507,208 @@ impl<'a, 'b:'a> ImportResolver<'a, 'b> { names_to_string(&directive.module_path), module_to_string(self.current_module)); - let target_module = match directive.target_module.get() { - Some(module) => module, - _ => match self.resolve_module_path(&directive.module_path, - DontUseLexicalScope, - directive.span) { + self.current_module = directive.parent; + + let module = if let Some(module) = directive.imported_module.get() { + module + } else { + let vis = directive.vis.get(); + // For better failure detection, pretend that the import will not define any names + // while resolving its module path. + directive.vis.set(ty::Visibility::PrivateExternal); + let result = + self.resolve_module_path(&directive.module_path, DontUseLexicalScope, None); + directive.vis.set(vis); + + match result { Success(module) => module, Indeterminate => return Indeterminate, Failed(err) => return Failed(err), - }, + } }; - directive.target_module.set(Some(target_module)); - let (source, target, value_determined, type_determined) = match directive.subclass { - SingleImport { source, target, ref value_determined, ref type_determined } => - (source, target, value_determined, type_determined), - GlobImport { .. } => return self.resolve_glob_import(target_module, directive), + directive.imported_module.set(Some(module)); + let (source, target, value_result, type_result) = match directive.subclass { + SingleImport { source, target, ref value_result, ref type_result } => + (source, target, value_result, type_result), + GlobImport { .. } => { + self.resolve_glob_import(directive); + return Success(()); + } }; - // We need to resolve both namespaces for this to succeed. - let value_result = self.resolve_name_in_module(target_module, source, ValueNS, false, true); - let type_result = self.resolve_name_in_module(target_module, source, TypeNS, false, true); - - let module = self.current_module; - let mut privacy_error = true; - for &(ns, result, determined) in &[(ValueNS, &value_result, value_determined), - (TypeNS, &type_result, type_determined)] { - match *result { - Failed(..) if !determined.get() => { - determined.set(true); - self.update_resolution(module, target, ns, |_, resolution| { + let mut indeterminate = false; + for &(ns, result) in &[(ValueNS, value_result), (TypeNS, type_result)] { + if let Err(Undetermined) = result.get() { + result.set({ + match self.resolve_name_in_module(module, source, ns, false, None) { + Success(binding) => Ok(binding), + Indeterminate => Err(Undetermined), + Failed(_) => Err(Determined), + } + }); + } else { + continue + }; + + match result.get() { + Err(Undetermined) => indeterminate = true, + Err(Determined) => { + self.update_resolution(directive.parent, target, ns, |_, resolution| { resolution.single_imports.directive_failed() }); } - Success(binding) if !binding.is_importable() => { + Ok(binding) if !binding.is_importable() => { let msg = format!("`{}` is not directly importable", target); struct_span_err!(self.session, directive.span, E0253, "{}", &msg) .span_label(directive.span, &format!("cannot be imported directly")) .emit(); // Do not import this illegal binding. Import a dummy binding and pretend // everything is fine - self.import_dummy_binding(module, directive); + self.import_dummy_binding(directive); return Success(()); } - Success(binding) if !self.is_accessible(binding.vis) => {} - Success(binding) if !determined.get() => { - determined.set(true); + Ok(binding) => { let imported_binding = self.import(binding, directive); - let conflict = self.try_define(module, target, ns, imported_binding); + let conflict = self.try_define(directive.parent, target, ns, imported_binding); if let Err(old_binding) = conflict { let binding = &self.import(binding, directive); - self.report_conflict(module, target, ns, binding, old_binding); + self.report_conflict(directive.parent, target, ns, binding, old_binding); } - privacy_error = false; } - Success(_) => privacy_error = false, - _ => {} } } - match (&value_result, &type_result) { - (&Indeterminate, _) | (_, &Indeterminate) => return Indeterminate, - (&Failed(_), &Failed(_)) => { - let resolutions = target_module.resolutions.borrow(); - let names = resolutions.iter().filter_map(|(&(ref name, _), resolution)| { - if *name == source { return None; } // Never suggest the same name + if indeterminate { Indeterminate } else { Success(()) } + } + + fn finalize_import(&mut self, directive: &'b ImportDirective<'b>) -> ResolveResult<()> { + self.current_module = directive.parent; + + let ImportDirective { ref module_path, span, .. } = *directive; + let module_result = self.resolve_module_path(&module_path, DontUseLexicalScope, Some(span)); + let module = match module_result { + Success(module) => module, + Indeterminate => return Indeterminate, + Failed(err) => { + let self_module = self.module_map[&self.current_module.normal_ancestor_id.unwrap()]; + + let resolve_from_self_result = self.resolve_module_path_from_root( + &self_module, &module_path, 0, Some(span)); + + return if let Success(_) = resolve_from_self_result { + let msg = format!("Did you mean `self::{}`?", &names_to_string(module_path)); + Failed(Some((span, msg))) + } else { + Failed(err) + }; + }, + }; + + let (name, value_result, type_result) = match directive.subclass { + SingleImport { source, ref value_result, ref type_result, .. } => + (source, value_result.get(), type_result.get()), + GlobImport { .. } if module.def_id() == directive.parent.def_id() => { + // Importing a module into itself is not allowed. + let msg = "Cannot glob-import a module into itself.".into(); + return Failed(Some((directive.span, msg))); + } + GlobImport { is_prelude, ref max_vis } => { + if !is_prelude && + max_vis.get() != ty::Visibility::PrivateExternal && // Allow empty globs. + !max_vis.get().is_at_least(directive.vis.get(), self) { + let msg = "A non-empty glob must import something with the glob's visibility"; + self.session.span_err(directive.span, msg); + } + return Success(()); + } + }; + + for &(ns, result) in &[(ValueNS, value_result), (TypeNS, type_result)] { + if let Ok(binding) = result { + if self.record_use(name, ns, binding, directive.span) { + self.resolution(module, name, ns).borrow_mut().binding = + Some(self.dummy_binding); + } + } + } + + if value_result.is_err() && type_result.is_err() { + let (value_result, type_result); + value_result = self.resolve_name_in_module(module, name, ValueNS, false, Some(span)); + type_result = self.resolve_name_in_module(module, name, TypeNS, false, Some(span)); + + return if let (Failed(_), Failed(_)) = (value_result, type_result) { + let resolutions = module.resolutions.borrow(); + let names = resolutions.iter().filter_map(|(&(ref n, _), resolution)| { + if *n == name { return None; } // Never suggest the same name match *resolution.borrow() { - NameResolution { binding: Some(_), .. } => Some(name), + NameResolution { binding: Some(_), .. } => Some(n), NameResolution { single_imports: SingleImports::None, .. } => None, - _ => Some(name), + _ => Some(n), } }); - let lev_suggestion = match find_best_match_for_name(names, &source.as_str(), None) { + let lev_suggestion = match find_best_match_for_name(names, &name.as_str(), None) { Some(name) => format!(". Did you mean to use `{}`?", name), None => "".to_owned(), }; - let module_str = module_to_string(target_module); + let module_str = module_to_string(module); let msg = if &module_str == "???" { - format!("There is no `{}` in the crate root{}", source, lev_suggestion) + format!("no `{}` in the root{}", name, lev_suggestion) } else { - format!("There is no `{}` in `{}`{}", source, module_str, lev_suggestion) + format!("no `{}` in `{}`{}", name, module_str, lev_suggestion) }; - return Failed(Some((directive.span, msg))); + Failed(Some((directive.span, msg))) + } else { + // `resolve_name_in_module` reported a privacy error. + self.import_dummy_binding(directive); + Success(()) } - _ => (), } - if privacy_error { - for &(ns, result) in &[(ValueNS, &value_result), (TypeNS, &type_result)] { - let binding = match *result { Success(binding) => binding, _ => continue }; - self.privacy_errors.push(PrivacyError(directive.span, source, binding)); - let imported_binding = self.import(binding, directive); - let _ = self.try_define(module, target, ns, imported_binding); + let session = self.session; + let reexport_error = || { + let msg = format!("`{}` is private, and cannot be reexported", name); + let note_msg = + format!("consider marking `{}` as `pub` in the imported module", name); + struct_span_err!(session, directive.span, E0364, "{}", &msg) + .span_note(directive.span, ¬e_msg) + .emit(); + }; + + let extern_crate_lint = || { + let msg = format!("extern crate `{}` is private, and cannot be reexported \ + (error E0364), consider declaring with `pub`", + name); + session.add_lint(PRIVATE_IN_PUBLIC, directive.id, directive.span, msg); + }; + + match (value_result, type_result) { + // With `#![feature(item_like_imports)]`, all namespaces + // must be re-exported with extra visibility for an error to occur. + (Ok(value_binding), Ok(type_binding)) if self.new_import_semantics => { + let vis = directive.vis.get(); + if !value_binding.pseudo_vis().is_at_least(vis, self) && + !type_binding.pseudo_vis().is_at_least(vis, self) { + reexport_error(); + } else if type_binding.is_extern_crate() && + !type_binding.vis.is_at_least(vis, self) { + extern_crate_lint(); + } } - } - match (&value_result, &type_result) { - (&Success(binding), _) if !binding.pseudo_vis().is_at_least(directive.vis, self) && - self.is_accessible(binding.vis) => { - let msg = format!("`{}` is private, and cannot be reexported", source); - let note_msg = format!("consider marking `{}` as `pub` in the imported module", - source); - struct_span_err!(self.session, directive.span, E0364, "{}", &msg) - .span_note(directive.span, ¬e_msg) - .emit(); + (Ok(binding), _) if !binding.pseudo_vis().is_at_least(directive.vis.get(), self) => { + reexport_error(); } - (_, &Success(binding)) if !binding.pseudo_vis().is_at_least(directive.vis, self) && - self.is_accessible(binding.vis) => { + (_, Ok(binding)) if !binding.pseudo_vis().is_at_least(directive.vis.get(), self) => { if binding.is_extern_crate() { - let msg = format!("extern crate `{}` is private, and cannot be reexported \ - (error E0364), consider declaring with `pub`", - source); - self.session.add_lint(PRIVATE_IN_PUBLIC, directive.id, directive.span, msg); + extern_crate_lint(); } else { - let msg = format!("`{}` is private, and cannot be reexported", source); - let note_msg = - format!("consider declaring type or module `{}` with `pub`", source); - struct_span_err!(self.session, directive.span, E0365, "{}", &msg) - .span_note(directive.span, ¬e_msg) + struct_span_err!(self.session, directive.span, E0365, + "`{}` is private, and cannot be reexported", name) + .span_label(directive.span, &format!("reexport of private `{}`", name)) + .note(&format!("consider declaring type or module `{}` with `pub`", name)) .emit(); } } @@ -599,9 +719,9 @@ impl<'a, 'b:'a> ImportResolver<'a, 'b> { // Record what this import resolves to for later uses in documentation, // this may resolve to either a value or a type, but for documentation // purposes it's good enough to just favor one over the other. - let def = match type_result.success().and_then(NameBinding::def) { + let def = match type_result.ok().map(NameBinding::def) { Some(def) => def, - None => value_result.success().and_then(NameBinding::def).unwrap(), + None => value_result.ok().map(NameBinding::def).unwrap(), }; let path_resolution = PathResolution::new(def); self.def_map.insert(directive.id, path_resolution); @@ -610,57 +730,46 @@ impl<'a, 'b:'a> ImportResolver<'a, 'b> { return Success(()); } - // Resolves a glob import. Note that this function cannot fail; it either - // succeeds or bails out (as importing * from an empty module or a module - // that exports nothing is valid). target_module is the module we are - // actually importing, i.e., `foo` in `use foo::*`. - fn resolve_glob_import(&mut self, target_module: Module<'b>, directive: &'b ImportDirective<'b>) - -> ResolveResult<()> { - if let Some(Def::Trait(_)) = target_module.def { - self.session.span_err(directive.span, "items in traits are not importable."); - } - - let module = self.current_module; - if module.def_id() == target_module.def_id() { - // This means we are trying to glob import a module into itself, and it is a no-go - let msg = "Cannot glob-import a module into itself.".into(); - return Failed(Some((directive.span, msg))); - } - self.populate_module_if_necessary(target_module); + fn resolve_glob_import(&mut self, directive: &'b ImportDirective<'b>) { + let module = directive.imported_module.get().unwrap(); + self.populate_module_if_necessary(module); - if let GlobImport { is_prelude: true } = directive.subclass { - self.prelude = Some(target_module); - return Success(()); + if let Some(Def::Trait(_)) = module.def() { + self.session.span_err(directive.span, "items in traits are not importable."); + return; + } else if module.def_id() == directive.parent.def_id() { + return; + } else if let GlobImport { is_prelude: true, .. } = directive.subclass { + self.prelude = Some(module); + return; } - // Add to target_module's glob_importers - target_module.glob_importers.borrow_mut().push((module, directive)); + // Add to module's glob_importers + module.glob_importers.borrow_mut().push(directive); // Ensure that `resolutions` isn't borrowed during `try_define`, // since it might get updated via a glob cycle. - let bindings = target_module.resolutions.borrow().iter().filter_map(|(name, resolution)| { + let bindings = module.resolutions.borrow().iter().filter_map(|(name, resolution)| { resolution.borrow().binding().map(|binding| (*name, binding)) }).collect::>(); for ((name, ns), binding) in bindings { - if binding.is_importable() && binding.is_pseudo_public() { + if binding.pseudo_vis() == ty::Visibility::Public || + self.new_import_semantics && self.is_accessible(binding.vis) { let imported_binding = self.import(binding, directive); - let _ = self.try_define(module, name, ns, imported_binding); + let _ = self.try_define(directive.parent, name, ns, imported_binding); } } // Record the destination of this import - if let Some(did) = target_module.def_id() { + if let Some(did) = module.def_id() { let resolution = PathResolution::new(Def::Mod(did)); self.def_map.insert(directive.id, resolution); } - - debug!("(resolving glob import) successfully resolved import"); - return Success(()); } // Miscellaneous post-processing, including recording reexports, reporting conflicts, // reporting the PRIVATE_IN_PUBLIC lint, and reporting unresolved imports. - fn finalize_resolutions_in(&mut self, module: Module<'b>, report_unresolved_imports: bool) { + fn finalize_resolutions_in(&mut self, module: Module<'b>) { // Since import resolution is finished, globs will not define any more names. *module.globs.borrow_mut() = Vec::new(); @@ -673,20 +782,23 @@ impl<'a, 'b:'a> ImportResolver<'a, 'b> { }; // Report conflicts - for duplicate_glob in resolution.duplicate_globs.iter() { - // FIXME #31337: We currently allow items to shadow glob-imported re-exports. - if !binding.is_import() { - if let NameBindingKind::Import { binding, .. } = duplicate_glob.kind { - if binding.is_import() { continue } + if !self.new_import_semantics { + for duplicate_glob in resolution.duplicate_globs.iter() { + // FIXME #31337: We currently allow items to shadow glob-imported re-exports. + if !binding.is_import() { + if let NameBindingKind::Import { binding, .. } = duplicate_glob.kind { + if binding.is_import() { continue } + } } - } - self.report_conflict(module, name, ns, duplicate_glob, binding); + self.report_conflict(module, name, ns, duplicate_glob, binding); + } } if binding.vis == ty::Visibility::Public && (binding.is_import() || binding.is_extern_crate()) { - if let Some(def) = binding.def() { + let def = binding.def(); + if def != Def::Err { reexports.push(Export { name: name, def_id: def.def_id() }); } } @@ -708,13 +820,6 @@ impl<'a, 'b:'a> ImportResolver<'a, 'b> { self.export_map.insert(node_id, reexports); } } - - if report_unresolved_imports { - for import in module.unresolved_imports.borrow().iter() { - resolve_error(self.resolver, import.span, ResolutionError::UnresolvedImport(None)); - break; - } - } } } diff --git a/src/librustc_save_analysis/data.rs b/src/librustc_save_analysis/data.rs index 493f766933..fc235aaf92 100644 --- a/src/librustc_save_analysis/data.rs +++ b/src/librustc_save_analysis/data.rs @@ -13,8 +13,9 @@ //! The `Dump` trait can be used together with `DumpVisitor` in order to //! retrieve the data from a crate. -use rustc::hir::def_id::DefId; -use syntax::ast::{CrateNum, NodeId}; +use rustc::hir; +use rustc::hir::def_id::{CrateNum, DefId}; +use syntax::ast::{self, NodeId}; use syntax_pos::Span; pub struct CrateData { @@ -76,6 +77,35 @@ pub enum Data { VariableRefData(VariableRefData), } +#[derive(Eq, PartialEq, Clone, Copy, Debug, RustcEncodable)] +pub enum Visibility { + Public, + Restricted, + Inherited, +} + +impl<'a> From<&'a ast::Visibility> for Visibility { + fn from(v: &'a ast::Visibility) -> Visibility { + match *v { + ast::Visibility::Public => Visibility::Public, + ast::Visibility::Crate(_) => Visibility::Restricted, + ast::Visibility::Restricted { .. } => Visibility::Restricted, + ast::Visibility::Inherited => Visibility::Inherited, + } + } +} + +impl<'a> From<&'a hir::Visibility> for Visibility { + fn from(v: &'a hir::Visibility) -> Visibility { + match *v { + hir::Visibility::Public => Visibility::Public, + hir::Visibility::Crate => Visibility::Restricted, + hir::Visibility::Restricted { .. } => Visibility::Restricted, + hir::Visibility::Inherited => Visibility::Inherited, + } + } +} + /// Data for the prelude of a crate. #[derive(Debug, RustcEncodable)] pub struct CratePreludeData { @@ -103,7 +133,8 @@ pub struct EnumData { pub span: Span, pub scope: NodeId, pub variants: Vec, - + pub visibility: Visibility, + pub docs: String, } /// Data for extern crates. @@ -135,6 +166,9 @@ pub struct FunctionData { pub span: Span, pub scope: NodeId, pub value: String, + pub visibility: Visibility, + pub parent: Option, + pub docs: String, } /// Data about a function call. @@ -181,6 +215,7 @@ pub struct MacroData { pub span: Span, pub name: String, pub qualname: String, + pub docs: String, } /// Data about a macro use. @@ -215,6 +250,9 @@ pub struct MethodData { pub scope: NodeId, pub value: String, pub decl_id: Option, + pub parent: Option, + pub visibility: Visibility, + pub docs: String, } /// Data for modules. @@ -227,6 +265,8 @@ pub struct ModData { pub scope: NodeId, pub filename: String, pub items: Vec, + pub visibility: Visibility, + pub docs: String, } /// Data for a reference to a module. @@ -248,6 +288,8 @@ pub struct StructData { pub scope: NodeId, pub value: String, pub fields: Vec, + pub visibility: Visibility, + pub docs: String, } #[derive(Debug, RustcEncodable)] @@ -258,7 +300,9 @@ pub struct StructVariantData { pub qualname: String, pub type_value: String, pub value: String, - pub scope: NodeId + pub scope: NodeId, + pub parent: Option, + pub docs: String, } #[derive(Debug, RustcEncodable)] @@ -270,6 +314,8 @@ pub struct TraitData { pub scope: NodeId, pub value: String, pub items: Vec, + pub visibility: Visibility, + pub docs: String, } #[derive(Debug, RustcEncodable)] @@ -280,7 +326,9 @@ pub struct TupleVariantData { pub qualname: String, pub type_value: String, pub value: String, - pub scope: NodeId + pub scope: NodeId, + pub parent: Option, + pub docs: String, } /// Data for a typedef. @@ -291,6 +339,9 @@ pub struct TypeDefData { pub span: Span, pub qualname: String, pub value: String, + pub visibility: Visibility, + pub parent: Option, + pub docs: String, } /// Data for a reference to a type or trait. @@ -308,7 +359,8 @@ pub struct UseData { pub span: Span, pub name: String, pub mod_id: Option, - pub scope: NodeId + pub scope: NodeId, + pub visibility: Visibility, } #[derive(Debug, RustcEncodable)] @@ -316,7 +368,8 @@ pub struct UseGlobData { pub id: NodeId, pub span: Span, pub names: Vec, - pub scope: NodeId + pub scope: NodeId, + pub visibility: Visibility, } /// Data for local and global variables (consts and statics). @@ -328,8 +381,11 @@ pub struct VariableData { pub qualname: String, pub span: Span, pub scope: NodeId, + pub parent: Option, pub value: String, pub type_value: String, + pub visibility: Visibility, + pub docs: String, } #[derive(Debug, RustcEncodable)] diff --git a/src/librustc_save_analysis/dump_visitor.rs b/src/librustc_save_analysis/dump_visitor.rs index 5e967f3250..0869ad168b 100644 --- a/src/librustc_save_analysis/dump_visitor.rs +++ b/src/librustc_save_analysis/dump_visitor.rs @@ -27,16 +27,18 @@ //! is used for recording the output in a format-agnostic way (see CsvDumper //! for an example). +use rustc::hir; use rustc::hir::def::Def; -use rustc::hir::def_id::DefId; -use rustc::hir::map::Node; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; +use rustc::hir::map::{Node, NodeItem}; use rustc::session::Session; use rustc::ty::{self, TyCtxt, ImplOrTraitItem, ImplOrTraitItemContainer}; use std::collections::HashSet; +use std::collections::hash_map::DefaultHasher; use std::hash::*; -use syntax::ast::{self, NodeId, PatKind}; +use syntax::ast::{self, NodeId, PatKind, Attribute, CRATE_NODE_ID}; use syntax::parse::token::{self, keywords}; use syntax::visit::{self, Visitor}; use syntax::print::pprust::{path_to_string, ty_to_string, bounds_to_string, generics_to_string}; @@ -44,10 +46,10 @@ use syntax::ptr::P; use syntax::codemap::Spanned; use syntax_pos::*; -use super::{escape, generated_code, SaveContext, PathCollector}; +use super::{escape, generated_code, SaveContext, PathCollector, docs_for_attrs}; use super::data::*; use super::dump::Dump; -use super::external_data::Lower; +use super::external_data::{Lower, make_def_id}; use super::span_utils::SpanUtils; use super::recorder; @@ -94,7 +96,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { analysis: analysis, dumper: dumper, span: span_utils.clone(), - cur_scope: 0, + cur_scope: CRATE_NODE_ID, mac_defs: HashSet::new(), mac_uses: HashSet::new(), } @@ -123,7 +125,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { let lo_loc = self.span.sess.codemap().lookup_char_pos(c.span.lo); ExternalCrateData { name: c.name, - num: c.number, + num: CrateNum::from_u32(c.number), file_name: SpanUtils::make_path_string(&lo_loc.file.name), } }).collect(); @@ -251,7 +253,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { ref_id: None, span: *span, qualname: qualname.to_owned(), - scope: 0 + scope: CRATE_NODE_ID }.lower(self.tcx)); // write the other sub-paths @@ -271,11 +273,13 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { // looks up anything, not just a type fn lookup_type_ref(&self, ref_id: NodeId) -> Option { - match self.tcx.expect_def(ref_id) { - Def::PrimTy(..) => None, - Def::SelfTy(..) => None, - def => Some(def.def_id()), - } + self.tcx.expect_def_or_none(ref_id).and_then(|def| { + match def { + Def::PrimTy(..) => None, + Def::SelfTy(..) => None, + def => Some(def.def_id()), + } + }) } fn process_def_kind(&mut self, @@ -290,8 +294,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { let def = self.tcx.expect_def(ref_id); match def { - Def::Mod(_) | - Def::ForeignMod(_) => { + Def::Mod(_) => { self.dumper.mod_ref(ModRefData { span: sub_span.expect("No span found for mod ref"), ref_id: Some(def_id), @@ -300,6 +303,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { }.lower(self.tcx)); } Def::Struct(..) | + Def::Union(..) | Def::Enum(..) | Def::TyAlias(..) | Def::AssociatedTy(..) | @@ -311,7 +315,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { qualname: String::new() }.lower(self.tcx)); } - Def::Static(_, _) | + Def::Static(..) | Def::Const(_) | Def::AssociatedConst(..) | Def::Local(..) | @@ -350,7 +354,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { let mut collector = PathCollector::new(); collector.visit_pat(&arg.pat); let span_utils = self.span.clone(); - for &(id, ref p, _, _) in &collector.collected_paths { + for &(id, ref p, ..) in &collector.collected_paths { let typ = self.tcx.node_types().get(&id).unwrap().to_string(); // get the span only for the name of the variable (I hope the path is only ever a // variable name, but who knows?) @@ -364,7 +368,10 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { qualname: format!("{}::{}", qualname, path_to_string(p)), type_value: typ, value: String::new(), - scope: 0 + scope: CRATE_NODE_ID, + parent: None, + visibility: Visibility::Inherited, + docs: String::new(), }.lower(self.tcx)); } } @@ -376,6 +383,8 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { body: Option<&ast::Block>, id: ast::NodeId, name: ast::Name, + vis: Visibility, + attrs: &[Attribute], span: Span) { debug!("process_method: {}:{}", id, name); @@ -393,20 +402,36 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { if !self.span.filter_generated(Some(method_data.span), span) { let container = self.tcx.impl_or_trait_item(self.tcx.map.local_def_id(id)).container(); - let decl_id = if let ImplOrTraitItemContainer::ImplContainer(id) = container { - self.tcx.trait_id_of_impl(id).and_then(|id| { - for item in &**self.tcx.trait_items(id) { - if let &ImplOrTraitItem::MethodTraitItem(ref m) = item { - if m.name == name { - return Some(m.def_id); + let mut trait_id; + let mut decl_id = None; + match container { + ImplOrTraitItemContainer::ImplContainer(id) => { + trait_id = self.tcx.trait_id_of_impl(id); + + match trait_id { + Some(id) => { + for item in &**self.tcx.trait_items(id) { + if let &ImplOrTraitItem::MethodTraitItem(ref m) = item { + if m.name == name { + decl_id = Some(m.def_id); + break; + } + } + } + } + None => { + if let Some(NodeItem(item)) = self.tcx.map.get_if_local(id) { + if let hir::ItemImpl(_, _, _, _, ref ty, _) = item.node { + trait_id = self.lookup_type_ref(ty.id); + } } } } - None - }) - } else { - None - }; + } + ImplOrTraitItemContainer::TraitContainer(id) => { + trait_id = Some(id); + } + } self.dumper.method(MethodData { id: method_data.id, @@ -416,6 +441,9 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { qualname: method_data.qualname.clone(), value: sig_str, decl_id: decl_id, + parent: trait_id, + visibility: vis, + docs: docs_for_attrs(attrs), }.lower(self.tcx)); } @@ -483,7 +511,10 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { name: name, id: param.id, qualname: qualname, - value: String::new() + value: String::new(), + visibility: Visibility::Inherited, + parent: None, + docs: String::new(), }.lower(self.tcx)); } } @@ -527,12 +558,15 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { self.visit_expr(expr); } - fn process_const(&mut self, - id: ast::NodeId, - name: ast::Name, - span: Span, - typ: &ast::Ty, - expr: &ast::Expr) { + fn process_assoc_const(&mut self, + id: ast::NodeId, + name: ast::Name, + span: Span, + typ: &ast::Ty, + expr: &ast::Expr, + parent_id: DefId, + vis: Visibility, + attrs: &[Attribute]) { let qualname = format!("::{}", self.tcx.node_path_str(id)); let sub_span = self.span.sub_span_after_keyword(span, keywords::Const); @@ -546,7 +580,10 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { qualname: qualname, value: self.span.snippet(expr.span), type_value: ty_to_string(&typ), - scope: self.cur_scope + scope: self.cur_scope, + parent: Some(parent_id), + visibility: vis, + docs: docs_for_attrs(attrs), }.lower(self.tcx)); } @@ -588,6 +625,8 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { scope: self.cur_scope, value: val, fields: fields, + visibility: From::from(&item.vis), + docs: docs_for_attrs(&item.attrs), }.lower(self.tcx)); } @@ -639,7 +678,9 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { qualname: qualname, type_value: enum_data.qualname.clone(), value: val, - scope: enum_data.scope + scope: enum_data.scope, + parent: Some(make_def_id(item.id, &self.tcx.map)), + docs: docs_for_attrs(&variant.node.attrs), }.lower(self.tcx)); } } @@ -662,7 +703,9 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { qualname: qualname, type_value: enum_data.qualname.clone(), value: val, - scope: enum_data.scope + scope: enum_data.scope, + parent: Some(make_def_id(item.id, &self.tcx.map)), + docs: docs_for_attrs(&variant.node.attrs), }.lower(self.tcx)); } } @@ -715,7 +758,8 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { } self.process_generic_params(type_parameters, item.span, "", item.id); for impl_item in impl_items { - self.visit_impl_item(impl_item); + let map = &self.tcx.map; + self.process_impl_item(impl_item, make_def_id(item.id, map)); } } @@ -744,6 +788,8 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { scope: self.cur_scope, value: val, items: methods.iter().map(|i| i.id).collect(), + visibility: From::from(&item.vis), + docs: docs_for_attrs(&item.attrs), }.lower(self.tcx)); } @@ -784,7 +830,8 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { // walk generics and methods self.process_generic_params(generics, item.span, &qualname, item.id); for method in methods { - self.visit_trait_item(method) + let map = &self.tcx.map; + self.process_trait_item(method, make_def_id(item.id, map)) } } @@ -878,7 +925,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { } } Def::Local(..) | - Def::Static(_,_) | + Def::Static(..) | Def::Const(..) | Def::AssociatedConst(..) | Def::Struct(..) | @@ -967,15 +1014,23 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { self.visit_pat(&p); for &(id, ref p, immut, _) in &collector.collected_paths { - let mut value = if immut == ast::Mutability::Immutable { - value.to_string() - } else { - "".to_string() + let mut value = match immut { + ast::Mutability::Immutable => value.to_string(), + _ => String::new(), }; let types = self.tcx.node_types(); - let typ = types.get(&id).map(|t| t.to_string()).unwrap_or(String::new()); - value.push_str(": "); - value.push_str(&typ); + let typ = match types.get(&id) { + Some(typ) => { + let typ = typ.to_string(); + if !value.is_empty() { + value.push_str(": "); + } + value.push_str(&typ); + typ + } + None => String::new(), + }; + // Get the span only for the name of the variable (I hope the path // is only ever a variable name, but who knows?). let sub_span = self.span.span_for_last_ident(p.span); @@ -989,7 +1044,10 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { qualname: format!("{}${}", path_to_string(p), id), value: value, type_value: typ, - scope: 0 + scope: CRATE_NODE_ID, + parent: None, + visibility: Visibility::Inherited, + docs: String::new(), }.lower(self.tcx)); } } @@ -1007,7 +1065,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { None => return, Some(data) => data, }; - let mut hasher = SipHasher::new(); + let mut hasher = DefaultHasher::new(); data.callee_span.hash(&mut hasher); let hash = hasher.finish(); let qualname = format!("{}::{}", data.name, hash); @@ -1019,7 +1077,9 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { self.dumper.macro_data(MacroData { span: sub_span, name: data.name.clone(), - qualname: qualname.clone() + qualname: qualname.clone(), + // FIXME where do macro docs come from? + docs: String::new(), }.lower(self.tcx)); } } @@ -1032,11 +1092,66 @@ impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { qualname: qualname, scope: data.scope, callee_span: data.callee_span, - imported: data.imported + imported: data.imported, }.lower(self.tcx)); } } } + + fn process_trait_item(&mut self, trait_item: &ast::TraitItem, trait_id: DefId) { + self.process_macro_use(trait_item.span, trait_item.id); + match trait_item.node { + ast::TraitItemKind::Const(ref ty, Some(ref expr)) => { + self.process_assoc_const(trait_item.id, + trait_item.ident.name, + trait_item.span, + &ty, + &expr, + trait_id, + Visibility::Public, + &trait_item.attrs); + } + ast::TraitItemKind::Method(ref sig, ref body) => { + self.process_method(sig, + body.as_ref().map(|x| &**x), + trait_item.id, + trait_item.ident.name, + Visibility::Public, + &trait_item.attrs, + trait_item.span); + } + ast::TraitItemKind::Const(_, None) | + ast::TraitItemKind::Type(..) | + ast::TraitItemKind::Macro(_) => {} + } + } + + fn process_impl_item(&mut self, impl_item: &ast::ImplItem, impl_id: DefId) { + self.process_macro_use(impl_item.span, impl_item.id); + match impl_item.node { + ast::ImplItemKind::Const(ref ty, ref expr) => { + self.process_assoc_const(impl_item.id, + impl_item.ident.name, + impl_item.span, + &ty, + &expr, + impl_id, + From::from(&impl_item.vis), + &impl_item.attrs); + } + ast::ImplItemKind::Method(ref sig, ref body) => { + self.process_method(sig, + Some(body), + impl_item.id, + impl_item.ident.name, + From::from(&impl_item.vis), + &impl_item.attrs, + impl_item.span); + } + ast::ImplItemKind::Type(_) | + ast::ImplItemKind::Macro(_) => {} + } + } } impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> { @@ -1072,7 +1187,8 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> id: item.id, mod_id: mod_id, name: ident.to_string(), - scope: self.cur_scope + scope: self.cur_scope, + visibility: From::from(&item.vis), }.lower(self.tcx)); } self.write_sub_paths_truncated(path, true); @@ -1095,25 +1211,19 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> span: sub_span.expect("No span found for use glob"), id: item.id, names: names, - scope: self.cur_scope + scope: self.cur_scope, + visibility: From::from(&item.vis), }.lower(self.tcx)); } self.write_sub_paths(path, true); } ast::ViewPathList(ref path, ref list) => { for plid in list { - match plid.node { - ast::PathListItemKind::Ident { id, .. } => { - let scope = self.cur_scope; - if let Some(def_id) = self.lookup_type_ref(id) { - self.process_def_kind(id, - plid.span, - Some(plid.span), - def_id, - scope); - } - } - ast::PathListItemKind::Mod { .. } => (), + let scope = self.cur_scope; + let id = plid.node.id; + if let Some(def_id) = self.lookup_type_ref(id) { + let span = plid.span; + self.process_def_kind(id, span, Some(span), def_id, scope); } } @@ -1129,7 +1239,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> let alias_span = self.span.span_for_last_ident(item.span); let cnum = match self.sess.cstore.extern_mod_stmt_cnum(item.id) { Some(cnum) => cnum, - None => 0, + None => LOCAL_CRATE, }; if !self.span.filter_generated(alias_span, item.span) { @@ -1143,7 +1253,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> }.lower(self.tcx)); } } - Fn(ref decl, _, _, _, ref ty_params, ref body) => + Fn(ref decl, .., ref ty_params, ref body) => self.process_fn(item, &decl, ty_params, &body), Static(ref typ, _, ref expr) => self.process_static_or_const_item(item, typ, expr), @@ -1151,7 +1261,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> self.process_static_or_const_item(item, &typ, &expr), Struct(ref def, ref ty_params) => self.process_struct(item, def, ty_params), Enum(ref def, ref ty_params) => self.process_enum(item, def, ty_params), - Impl(_, _, + Impl(.., ref ty_params, ref trait_ref, ref typ, @@ -1174,7 +1284,10 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> name: item.ident.to_string(), id: item.id, qualname: qualname.clone(), - value: value + value: value, + visibility: From::from(&item.vis), + parent: None, + docs: docs_for_attrs(&item.attrs), }.lower(self.tcx)); } @@ -1199,51 +1312,6 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> } } - fn visit_trait_item(&mut self, trait_item: &ast::TraitItem) { - self.process_macro_use(trait_item.span, trait_item.id); - match trait_item.node { - ast::TraitItemKind::Const(ref ty, Some(ref expr)) => { - self.process_const(trait_item.id, - trait_item.ident.name, - trait_item.span, - &ty, - &expr); - } - ast::TraitItemKind::Method(ref sig, ref body) => { - self.process_method(sig, - body.as_ref().map(|x| &**x), - trait_item.id, - trait_item.ident.name, - trait_item.span); - } - ast::TraitItemKind::Const(_, None) | - ast::TraitItemKind::Type(..) | - ast::TraitItemKind::Macro(_) => {} - } - } - - fn visit_impl_item(&mut self, impl_item: &ast::ImplItem) { - self.process_macro_use(impl_item.span, impl_item.id); - match impl_item.node { - ast::ImplItemKind::Const(ref ty, ref expr) => { - self.process_const(impl_item.id, - impl_item.ident.name, - impl_item.span, - &ty, - &expr); - } - ast::ImplItemKind::Method(ref sig, ref body) => { - self.process_method(sig, - Some(body), - impl_item.id, - impl_item.ident.name, - impl_item.span); - } - ast::ImplItemKind::Type(_) | - ast::ImplItemKind::Macro(_) => {} - } - } - fn visit_ty(&mut self, t: &ast::Ty) { self.process_macro_use(t.span, t.id); match t.node { @@ -1286,7 +1354,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> let def = self.tcx.expect_def(hir_expr.id); self.process_struct_lit(ex, path, fields, adt.variant_of_def(def), base) } - ast::ExprKind::MethodCall(_, _, ref args) => self.process_method_call(ex, args), + ast::ExprKind::MethodCall(.., ref args) => self.process_method_call(ex, args), ast::ExprKind::Field(ref sub_ex, _) => { self.visit_expr(&sub_ex); @@ -1310,7 +1378,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> }; let ty = &self.tcx.expr_ty_adjusted(&hir_node).sty; match *ty { - ty::TyStruct(def, _) => { + ty::TyAdt(def, _) => { let sub_span = self.span.sub_span_after_token(ex.span, token::Dot); if !self.span.filter_generated(sub_span, ex.span) { self.dumper.variable_ref(VariableRefData { @@ -1388,12 +1456,17 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> // process collected paths for &(id, ref p, immut, ref_kind) in &collector.collected_paths { match self.tcx.expect_def(id) { - Def::Local(_, id) => { - let value = if immut == ast::Mutability::Immutable { + Def::Local(def_id) => { + let id = self.tcx.map.as_local_node_id(def_id).unwrap(); + let mut value = if immut == ast::Mutability::Immutable { self.span.snippet(p.span).to_string() } else { "".to_string() }; + let typ = self.tcx.node_types() + .get(&id).map(|t| t.to_string()).unwrap_or(String::new()); + value.push_str(": "); + value.push_str(&typ); assert!(p.segments.len() == 1, "qualified path for local variable def in arm"); @@ -1405,8 +1478,11 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> name: path_to_string(p), qualname: format!("{}${}", path_to_string(p), id), value: value, - type_value: String::new(), - scope: 0 + type_value: typ, + scope: CRATE_NODE_ID, + parent: None, + visibility: Visibility::Inherited, + docs: String::new(), }.lower(self.tcx)); } } @@ -1415,7 +1491,7 @@ impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> paths_to_process.push((id, p.clone(), Some(ref_kind))) } // FIXME(nrc) what are these doing here? - Def::Static(_, _) | + Def::Static(..) | Def::Const(..) | Def::AssociatedConst(..) => {} def => error!("unexpected definition kind when processing collected paths: {:?}", diff --git a/src/librustc_save_analysis/external_data.rs b/src/librustc_save_analysis/external_data.rs index 65e4f7e869..5847575742 100644 --- a/src/librustc_save_analysis/external_data.rs +++ b/src/librustc_save_analysis/external_data.rs @@ -8,14 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc::hir::def_id::{DefId, DefIndex}; +use rustc::hir::def_id::{CrateNum, DefId, DefIndex}; use rustc::hir::map::Map; use rustc::ty::TyCtxt; -use syntax::ast::{CrateNum, NodeId}; +use syntax::ast::NodeId; use syntax::codemap::CodeMap; use syntax_pos::Span; -use data; +use data::{self, Visibility}; // FIXME: this should be pub(crate), but the current snapshot doesn't allow it yet pub trait Lower { @@ -23,12 +23,15 @@ pub trait Lower { fn lower(self, tcx: TyCtxt) -> Self::Target; } -fn make_def_id(id: NodeId, map: &Map) -> DefId { +pub fn make_def_id(id: NodeId, map: &Map) -> DefId { map.opt_local_def_id(id).unwrap_or(null_def_id()) } pub fn null_def_id() -> DefId { - DefId { krate: u32::max_value(), index: DefIndex::from_u32(u32::max_value()) } + DefId { + krate: CrateNum::from_u32(u32::max_value()), + index: DefIndex::from_u32(u32::max_value()) + } } #[derive(Clone, Debug, RustcEncodable)] @@ -91,7 +94,9 @@ pub struct EnumData { pub qualname: String, pub span: SpanData, pub scope: DefId, - pub variants: Vec + pub variants: Vec, + pub visibility: Visibility, + pub docs: String, } impl Lower for data::EnumData { @@ -106,6 +111,8 @@ impl Lower for data::EnumData { span: SpanData::from_span(self.span, tcx.sess.codemap()), scope: make_def_id(self.scope, &tcx.map), variants: self.variants.into_iter().map(|id| make_def_id(id, &tcx.map)).collect(), + visibility: self.visibility, + docs: self.docs, } } } @@ -166,6 +173,9 @@ pub struct FunctionData { pub span: SpanData, pub scope: DefId, pub value: String, + pub visibility: Visibility, + pub parent: Option, + pub docs: String, } impl Lower for data::FunctionData { @@ -180,6 +190,9 @@ impl Lower for data::FunctionData { span: SpanData::from_span(self.span, tcx.sess.codemap()), scope: make_def_id(self.scope, &tcx.map), value: self.value, + visibility: self.visibility, + parent: self.parent, + docs: self.docs, } } } @@ -251,6 +264,7 @@ pub struct MacroData { pub span: SpanData, pub name: String, pub qualname: String, + pub docs: String, } impl Lower for data::MacroData { @@ -261,6 +275,7 @@ impl Lower for data::MacroData { span: SpanData::from_span(self.span, tcx.sess.codemap()), name: self.name, qualname: self.qualname, + docs: self.docs, } } } @@ -323,6 +338,9 @@ pub struct MethodData { pub scope: DefId, pub value: String, pub decl_id: Option, + pub visibility: Visibility, + pub parent: Option, + pub docs: String, } impl Lower for data::MethodData { @@ -337,6 +355,9 @@ impl Lower for data::MethodData { qualname: self.qualname, value: self.value, decl_id: self.decl_id, + visibility: self.visibility, + parent: self.parent, + docs: self.docs, } } } @@ -351,6 +372,8 @@ pub struct ModData { pub scope: DefId, pub filename: String, pub items: Vec, + pub visibility: Visibility, + pub docs: String, } impl Lower for data::ModData { @@ -365,6 +388,8 @@ impl Lower for data::ModData { scope: make_def_id(self.scope, &tcx.map), filename: self.filename, items: self.items.into_iter().map(|id| make_def_id(id, &tcx.map)).collect(), + visibility: self.visibility, + docs: self.docs, } } } @@ -401,6 +426,8 @@ pub struct StructData { pub scope: DefId, pub value: String, pub fields: Vec, + pub visibility: Visibility, + pub docs: String, } impl Lower for data::StructData { @@ -416,6 +443,8 @@ impl Lower for data::StructData { scope: make_def_id(self.scope, &tcx.map), value: self.value, fields: self.fields.into_iter().map(|id| make_def_id(id, &tcx.map)).collect(), + visibility: self.visibility, + docs: self.docs, } } } @@ -428,7 +457,9 @@ pub struct StructVariantData { pub qualname: String, pub type_value: String, pub value: String, - pub scope: DefId + pub scope: DefId, + pub parent: Option, + pub docs: String, } impl Lower for data::StructVariantData { @@ -443,6 +474,8 @@ impl Lower for data::StructVariantData { type_value: self.type_value, value: self.value, scope: make_def_id(self.scope, &tcx.map), + parent: self.parent, + docs: self.docs, } } } @@ -456,6 +489,8 @@ pub struct TraitData { pub scope: DefId, pub value: String, pub items: Vec, + pub visibility: Visibility, + pub docs: String, } impl Lower for data::TraitData { @@ -470,6 +505,8 @@ impl Lower for data::TraitData { scope: make_def_id(self.scope, &tcx.map), value: self.value, items: self.items.into_iter().map(|id| make_def_id(id, &tcx.map)).collect(), + visibility: self.visibility, + docs: self.docs, } } } @@ -483,6 +520,8 @@ pub struct TupleVariantData { pub type_value: String, pub value: String, pub scope: DefId, + pub parent: Option, + pub docs: String, } impl Lower for data::TupleVariantData { @@ -497,6 +536,8 @@ impl Lower for data::TupleVariantData { type_value: self.type_value, value: self.value, scope: make_def_id(self.scope, &tcx.map), + parent: self.parent, + docs: self.docs, } } } @@ -509,6 +550,9 @@ pub struct TypeDefData { pub span: SpanData, pub qualname: String, pub value: String, + pub visibility: Visibility, + pub parent: Option, + pub docs: String, } impl Lower for data::TypeDefData { @@ -521,6 +565,9 @@ impl Lower for data::TypeDefData { span: SpanData::from_span(self.span, tcx.sess.codemap()), qualname: self.qualname, value: self.value, + visibility: self.visibility, + parent: self.parent, + docs: self.docs, } } } @@ -553,7 +600,8 @@ pub struct UseData { pub span: SpanData, pub name: String, pub mod_id: Option, - pub scope: DefId + pub scope: DefId, + pub visibility: Visibility, } impl Lower for data::UseData { @@ -566,6 +614,7 @@ impl Lower for data::UseData { name: self.name, mod_id: self.mod_id, scope: make_def_id(self.scope, &tcx.map), + visibility: self.visibility, } } } @@ -575,7 +624,8 @@ pub struct UseGlobData { pub id: DefId, pub span: SpanData, pub names: Vec, - pub scope: DefId + pub scope: DefId, + pub visibility: Visibility, } impl Lower for data::UseGlobData { @@ -587,6 +637,7 @@ impl Lower for data::UseGlobData { span: SpanData::from_span(self.span, tcx.sess.codemap()), names: self.names, scope: make_def_id(self.scope, &tcx.map), + visibility: self.visibility, } } } @@ -602,6 +653,9 @@ pub struct VariableData { pub scope: DefId, pub value: String, pub type_value: String, + pub parent: Option, + pub visibility: Visibility, + pub docs: String, } impl Lower for data::VariableData { @@ -617,6 +671,9 @@ impl Lower for data::VariableData { scope: make_def_id(self.scope, &tcx.map), value: self.value, type_value: self.type_value, + parent: self.parent, + visibility: self.visibility, + docs: self.docs, } } } diff --git a/src/librustc_save_analysis/json_api_dumper.rs b/src/librustc_save_analysis/json_api_dumper.rs new file mode 100644 index 0000000000..d56aae18a7 --- /dev/null +++ b/src/librustc_save_analysis/json_api_dumper.rs @@ -0,0 +1,415 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::io::Write; + +use rustc::hir::def_id::DefId; +use rustc_serialize::json::as_json; + +use external_data::*; +use data::{VariableKind, Visibility}; +use dump::Dump; +use super::Format; + + +// A dumper to dump a restricted set of JSON information, designed for use with +// libraries distributed without their source. Clients are likely to use type +// information here, and (for example) generate Rustdoc URLs, but don't need +// information for navigating the source of the crate. +// Relative to the regular JSON save-analysis info, this form is filtered to +// remove non-visible items, but includes some extra info for items (e.g., the +// parent field for finding the struct to which a field belongs). +pub struct JsonApiDumper<'b, W: Write + 'b> { + output: &'b mut W, + result: Analysis, +} + +impl<'b, W: Write> JsonApiDumper<'b, W> { + pub fn new(writer: &'b mut W) -> JsonApiDumper<'b, W> { + JsonApiDumper { output: writer, result: Analysis::new() } + } +} + +impl<'b, W: Write> Drop for JsonApiDumper<'b, W> { + fn drop(&mut self) { + if let Err(_) = write!(self.output, "{}", as_json(&self.result)) { + error!("Error writing output"); + } + } +} + +macro_rules! impl_fn { + ($fn_name: ident, $data_type: ident, $bucket: ident) => { + fn $fn_name(&mut self, data: $data_type) { + if let Some(datum) = From::from(data) { + self.result.$bucket.push(datum); + } + } + } +} + +impl<'b, W: Write + 'b> Dump for JsonApiDumper<'b, W> { + fn crate_prelude(&mut self, data: CratePreludeData) { + self.result.prelude = Some(data) + } + + impl_fn!(use_data, UseData, imports); + impl_fn!(use_glob, UseGlobData, imports); + + impl_fn!(enum_data, EnumData, defs); + impl_fn!(tuple_variant, TupleVariantData, defs); + impl_fn!(struct_variant, StructVariantData, defs); + impl_fn!(struct_data, StructData, defs); + impl_fn!(trait_data, TraitData, defs); + impl_fn!(function, FunctionData, defs); + impl_fn!(method, MethodData, defs); + impl_fn!(macro_data, MacroData, defs); + impl_fn!(mod_data, ModData, defs); + impl_fn!(typedef, TypeDefData, defs); + impl_fn!(variable, VariableData, defs); +} + +// FIXME methods. The defs have information about possible overriding and the +// refs have decl information (e.g., a trait method where we know the required +// method, but not the supplied method). In both cases, we are currently +// ignoring it. + +#[derive(Debug, RustcEncodable)] +struct Analysis { + kind: Format, + prelude: Option, + imports: Vec, + defs: Vec, + // These two fields are dummies so that clients can parse the two kinds of + // JSON data in the same way. + refs: Vec<()>, + macro_refs: Vec<()>, +} + +impl Analysis { + fn new() -> Analysis { + Analysis { + kind: Format::JsonApi, + prelude: None, + imports: vec![], + defs: vec![], + refs: vec![], + macro_refs: vec![], + } + } +} + +// DefId::index is a newtype and so the JSON serialisation is ugly. Therefore +// we use our own Id which is the same, but without the newtype. +#[derive(Debug, RustcEncodable)] +struct Id { + krate: u32, + index: u32, +} + +impl From for Id { + fn from(id: DefId) -> Id { + Id { + krate: id.krate.as_u32(), + index: id.index.as_u32(), + } + } +} + +#[derive(Debug, RustcEncodable)] +struct Import { + kind: ImportKind, + id: Id, + span: SpanData, + name: String, + value: String, +} + +#[derive(Debug, RustcEncodable)] +enum ImportKind { + Use, + GlobUse, +} + +impl From for Option { + fn from(data: UseData) -> Option { + match data.visibility { + Visibility::Public => Some(Import { + kind: ImportKind::Use, + id: From::from(data.id), + span: data.span, + name: data.name, + value: String::new(), + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: UseGlobData) -> Option { + match data.visibility { + Visibility::Public => Some(Import { + kind: ImportKind::GlobUse, + id: From::from(data.id), + span: data.span, + name: "*".to_owned(), + value: data.names.join(", "), + }), + _ => None, + } + } +} + +#[derive(Debug, RustcEncodable)] +struct Def { + kind: DefKind, + id: Id, + span: SpanData, + name: String, + qualname: String, + value: String, + parent: Option, + children: Vec, + decl_id: Option, + docs: String, +} + +#[derive(Debug, RustcEncodable)] +enum DefKind { + // value = variant names + Enum, + // value = enum name + variant name + types + Tuple, + // value = [enum name +] name + fields + Struct, + // value = signature + Trait, + // value = type + generics + Function, + // value = type + generics + Method, + // No id, no value. + Macro, + // value = file_name + Mod, + // value = aliased type + Type, + // value = type and init expression (for all variable kinds). + Static, + Const, + Field, +} + +impl From for Option { + fn from(data: EnumData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Enum, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + parent: None, + children: data.variants.into_iter().map(|id| From::from(id)).collect(), + decl_id: None, + docs: data.docs, + }), + _ => None, + } + } +} + +impl From for Option { + fn from(data: TupleVariantData) -> Option { + Some(Def { + kind: DefKind::Tuple, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + parent: data.parent.map(|id| From::from(id)), + children: vec![], + decl_id: None, + docs: data.docs, + }) + } +} +impl From for Option { + fn from(data: StructVariantData) -> Option { + Some(Def { + kind: DefKind::Struct, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + parent: data.parent.map(|id| From::from(id)), + children: vec![], + decl_id: None, + docs: data.docs, + }) + } +} +impl From for Option { + fn from(data: StructData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Struct, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + parent: None, + children: data.fields.into_iter().map(|id| From::from(id)).collect(), + decl_id: None, + docs: data.docs, + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: TraitData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Trait, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: data.items.into_iter().map(|id| From::from(id)).collect(), + parent: None, + decl_id: None, + docs: data.docs, + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: FunctionData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Function, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + parent: data.parent.map(|id| From::from(id)), + decl_id: None, + docs: data.docs, + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: MethodData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Method, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + parent: data.parent.map(|id| From::from(id)), + decl_id: data.decl_id.map(|id| From::from(id)), + docs: data.docs, + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: MacroData) -> Option { + Some(Def { + kind: DefKind::Macro, + id: From::from(null_def_id()), + span: data.span, + name: data.name, + qualname: data.qualname, + value: String::new(), + children: vec![], + parent: None, + decl_id: None, + docs: data.docs, + }) + } +} +impl From for Option { + fn from(data:ModData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Mod, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.filename, + children: data.items.into_iter().map(|id| From::from(id)).collect(), + parent: None, + decl_id: None, + docs: data.docs, + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: TypeDefData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Type, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + parent: data.parent.map(|id| From::from(id)), + decl_id: None, + docs: String::new(), + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: VariableData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: match data.kind { + VariableKind::Static => DefKind::Static, + VariableKind::Const => DefKind::Const, + VariableKind::Local => { return None } + VariableKind::Field => DefKind::Field, + }, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + parent: data.parent.map(|id| From::from(id)), + decl_id: None, + docs: data.docs, + }), + _ => None, + } + } +} diff --git a/src/librustc_save_analysis/json_dumper.rs b/src/librustc_save_analysis/json_dumper.rs index b1955cbd7b..0378d75cc6 100644 --- a/src/librustc_save_analysis/json_dumper.rs +++ b/src/librustc_save_analysis/json_dumper.rs @@ -16,6 +16,7 @@ use rustc_serialize::json::as_json; use external_data::*; use data::VariableKind; use dump::Dump; +use super::Format; pub struct JsonDumper<'b, W: Write + 'b> { output: &'b mut W, @@ -87,6 +88,7 @@ impl<'b, W: Write + 'b> Dump for JsonDumper<'b, W> { #[derive(Debug, RustcEncodable)] struct Analysis { + kind: Format, prelude: Option, imports: Vec, defs: Vec, @@ -97,6 +99,7 @@ struct Analysis { impl Analysis { fn new() -> Analysis { Analysis { + kind: Format::Json, prelude: None, imports: vec![], defs: vec![], @@ -117,7 +120,7 @@ struct Id { impl From for Id { fn from(id: DefId) -> Id { Id { - krate: id.krate, + krate: id.krate.as_u32(), index: id.index.as_u32(), } } @@ -183,6 +186,7 @@ struct Def { value: String, children: Vec, decl_id: Option, + docs: String, } #[derive(Debug, RustcEncodable)] @@ -223,6 +227,7 @@ impl From for Def { value: data.value, children: data.variants.into_iter().map(|id| From::from(id)).collect(), decl_id: None, + docs: data.docs, } } } @@ -238,6 +243,7 @@ impl From for Def { value: data.value, children: vec![], decl_id: None, + docs: data.docs, } } } @@ -252,6 +258,7 @@ impl From for Def { value: data.value, children: vec![], decl_id: None, + docs: data.docs, } } } @@ -266,6 +273,7 @@ impl From for Def { value: data.value, children: data.fields.into_iter().map(|id| From::from(id)).collect(), decl_id: None, + docs: data.docs, } } } @@ -280,6 +288,7 @@ impl From for Def { value: data.value, children: data.items.into_iter().map(|id| From::from(id)).collect(), decl_id: None, + docs: data.docs, } } } @@ -294,6 +303,7 @@ impl From for Def { value: data.value, children: vec![], decl_id: None, + docs: data.docs, } } } @@ -308,6 +318,7 @@ impl From for Def { value: data.value, children: vec![], decl_id: data.decl_id.map(|id| From::from(id)), + docs: data.docs, } } } @@ -322,6 +333,7 @@ impl From for Def { value: String::new(), children: vec![], decl_id: None, + docs: data.docs, } } } @@ -336,6 +348,7 @@ impl From for Def { value: data.filename, children: data.items.into_iter().map(|id| From::from(id)).collect(), decl_id: None, + docs: data.docs, } } } @@ -350,6 +363,7 @@ impl From for Def { value: data.value, children: vec![], decl_id: None, + docs: String::new(), } } } @@ -366,9 +380,10 @@ impl From for Def { span: data.span, name: data.name, qualname: data.qualname, - value: data.value, + value: data.type_value, children: vec![], decl_id: None, + docs: data.docs, } } } diff --git a/src/librustc_save_analysis/lib.rs b/src/librustc_save_analysis/lib.rs index 16cd9186ce..aa68a87312 100644 --- a/src/librustc_save_analysis/lib.rs +++ b/src/librustc_save_analysis/lib.rs @@ -18,6 +18,7 @@ #![cfg_attr(not(stage0), deny(warnings))] #![feature(custom_attribute)] +#![feature(dotdot_in_tuple_patterns)] #![allow(unused_attributes)] #![feature(rustc_private)] #![feature(staged_api)] @@ -29,7 +30,9 @@ extern crate serialize as rustc_serialize; extern crate syntax_pos; + mod csv_dumper; +mod json_api_dumper; mod json_dumper; mod data; mod dump; @@ -49,16 +52,19 @@ use std::env; use std::fs::{self, File}; use std::path::{Path, PathBuf}; -use syntax::ast::{self, NodeId, PatKind}; -use syntax::parse::token::{self, keywords}; +use syntax::ast::{self, NodeId, PatKind, Attribute, CRATE_NODE_ID}; +use syntax::parse::lexer::comments::strip_doc_comment_decoration; +use syntax::parse::token::{self, keywords, InternedString}; use syntax::visit::{self, Visitor}; use syntax::print::pprust::{ty_to_string, arg_to_string}; use syntax::codemap::MacroAttribute; use syntax_pos::*; pub use self::csv_dumper::CsvDumper; +pub use self::json_api_dumper::JsonApiDumper; pub use self::json_dumper::JsonDumper; pub use self::data::*; +pub use self::external_data::make_def_id; pub use self::dump::Dump; pub use self::dump_visitor::DumpVisitor; use self::span_utils::SpanUtils; @@ -114,7 +120,7 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { }; result.push(CrateData { name: (&self.tcx.sess.cstore.crate_name(n)[..]).to_owned(), - number: n, + number: n.as_u32(), span: span, }); } @@ -124,7 +130,7 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { pub fn get_item_data(&self, item: &ast::Item) -> Option { match item.node { - ast::ItemKind::Fn(ref decl, _, _, _, ref generics, _) => { + ast::ItemKind::Fn(ref decl, .., ref generics, _) => { let qualname = format!("::{}", self.tcx.node_path_str(item.id)); let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Fn); filter!(self.span_utils, sub_span, item.span, None); @@ -138,6 +144,9 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { span: sub_span.unwrap(), scope: self.enclosing_scope(item.id), value: make_signature(decl, generics), + visibility: From::from(&item.vis), + parent: None, + docs: docs_for_attrs(&item.attrs), })) } ast::ItemKind::Static(ref typ, mt, ref expr) => { @@ -160,8 +169,11 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { qualname: qualname, span: sub_span.unwrap(), scope: self.enclosing_scope(item.id), + parent: None, value: value, type_value: ty_to_string(&typ), + visibility: From::from(&item.vis), + docs: docs_for_attrs(&item.attrs), })) } ast::ItemKind::Const(ref typ, ref expr) => { @@ -175,8 +187,11 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { qualname: qualname, span: sub_span.unwrap(), scope: self.enclosing_scope(item.id), + parent: None, value: self.span_utils.snippet(expr.span), type_value: ty_to_string(&typ), + visibility: From::from(&item.vis), + docs: docs_for_attrs(&item.attrs), })) } ast::ItemKind::Mod(ref m) => { @@ -195,6 +210,8 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { scope: self.enclosing_scope(item.id), filename: filename, items: m.items.iter().map(|i| i.id).collect(), + visibility: From::from(&item.vis), + docs: docs_for_attrs(&item.attrs), })) } ast::ItemKind::Enum(ref def, _) => { @@ -215,9 +232,11 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { qualname: qualname, scope: self.enclosing_scope(item.id), variants: def.variants.iter().map(|v| v.node.data.id()).collect(), + visibility: From::from(&item.vis), + docs: docs_for_attrs(&item.attrs), })) } - ast::ItemKind::Impl(_, _, _, ref trait_ref, ref typ, _) => { + ast::ItemKind::Impl(.., ref trait_ref, ref typ, _) => { let mut type_data = None; let sub_span; @@ -277,8 +296,11 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { qualname: qualname, span: sub_span.unwrap(), scope: scope, + parent: Some(make_def_id(scope, &self.tcx.map)), value: "".to_owned(), type_value: typ, + visibility: From::from(&field.vis), + docs: docs_for_attrs(&field.attrs), }) } else { None @@ -291,21 +313,22 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { name: ast::Name, span: Span) -> Option { // The qualname for a method is the trait name or name of the struct in an impl in // which the method is declared in, followed by the method's name. - let qualname = match self.tcx.impl_of_method(self.tcx.map.local_def_id(id)) { + let (qualname, parent_scope, vis, docs) = + match self.tcx.impl_of_method(self.tcx.map.local_def_id(id)) { Some(impl_id) => match self.tcx.map.get_if_local(impl_id) { Some(NodeItem(item)) => { match item.node { - hir::ItemImpl(_, _, _, _, ref ty, _) => { + hir::ItemImpl(.., ref ty, _) => { let mut result = String::from("<"); result.push_str(&rustc::hir::print::ty_to_string(&ty)); - if let Some(def_id) = self.tcx - .trait_of_item(self.tcx.map.local_def_id(id)) { + let trait_id = self.tcx.trait_id_of_impl(impl_id); + if let Some(def_id) = trait_id { result.push_str(" as "); result.push_str(&self.tcx.item_path_str(def_id)); } result.push_str(">"); - result + (result, trait_id, From::from(&item.vis), docs_for_attrs(&item.attrs)) } _ => { span_bug!(span, @@ -326,8 +349,11 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { None => match self.tcx.trait_of_item(self.tcx.map.local_def_id(id)) { Some(def_id) => { match self.tcx.map.get_if_local(def_id) { - Some(NodeItem(_)) => { - format!("::{}", self.tcx.item_path_str(def_id)) + Some(NodeItem(item)) => { + (format!("::{}", self.tcx.item_path_str(def_id)), + Some(def_id), + From::from(&item.vis), + docs_for_attrs(&item.attrs)) } r => { span_bug!(span, @@ -348,8 +374,7 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { let qualname = format!("{}::{}", qualname, name); let def_id = self.tcx.map.local_def_id(id); - let decl_id = self.tcx.trait_item_of_item(def_id).and_then(|new_id| { - let new_def_id = new_id.def_id(); + let decl_id = self.tcx.trait_item_of_item(def_id).and_then(|new_def_id| { if new_def_id != def_id { Some(new_def_id) } else { @@ -368,6 +393,9 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { scope: self.enclosing_scope(id), // FIXME you get better data here by using the visitor. value: String::new(), + visibility: vis, + parent: parent_scope, + docs: docs, }) } @@ -405,7 +433,7 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { } }; match self.tcx.expr_ty_adjusted(&hir_node).sty { - ty::TyStruct(def, _) => { + ty::TyAdt(def, _) if !def.is_enum() => { let f = def.struct_variant().field_named(ident.node.name); let sub_span = self.span_utils.span_for_last_ident(expr.span); filter!(self.span_utils, sub_span, expr.span, None); @@ -417,14 +445,14 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { })); } _ => { - debug!("Expected struct type, found {:?}", ty); + debug!("Expected struct or union type, found {:?}", ty); None } } } - ast::ExprKind::Struct(ref path, _, _) => { + ast::ExprKind::Struct(ref path, ..) => { match self.tcx.expr_ty_adjusted(&hir_node).sty { - ty::TyStruct(def, _) => { + ty::TyAdt(def, _) if !def.is_enum() => { let sub_span = self.span_utils.span_for_last_ident(path.span); filter!(self.span_utils, sub_span, path.span, None); Some(Data::TypeRefData(TypeRefData { @@ -435,9 +463,9 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { })) } _ => { - // FIXME ty could legitimately be a TyEnum, but then we will fail + // FIXME ty could legitimately be an enum, but then we will fail // later if we try to look up the fields. - debug!("expected TyStruct, found {:?}", ty); + debug!("expected struct or union, found {:?}", ty); None } } @@ -488,10 +516,11 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { })) } Def::Struct(def_id) | + Def::Union(def_id) | Def::Enum(def_id) | Def::TyAlias(def_id) | Def::Trait(def_id) | - Def::TyParam(_, _, def_id, _) => { + Def::TyParam(def_id) => { Some(Data::TypeRefData(TypeRefData { span: sub_span.unwrap(), ref_id: Some(def_id), @@ -513,16 +542,9 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { .map(|mr| mr.def_id()) } ty::ImplContainer(def_id) => { - let impl_items = self.tcx.impl_items.borrow(); - Some(impl_items.get(&def_id) - .unwrap() - .iter() - .find(|mr| { - self.tcx.impl_or_trait_item(mr.def_id()).name() == - ti.name() - }) - .unwrap() - .def_id()) + Some(*self.tcx.impl_or_trait_items(def_id).iter().find(|&&mr| { + self.tcx.impl_or_trait_item(mr).name() == ti.name() + }).unwrap()) } } } else { @@ -646,12 +668,12 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { #[inline] pub fn enclosing_scope(&self, id: NodeId) -> NodeId { - self.tcx.map.get_enclosing_scope(id).unwrap_or(0) + self.tcx.map.get_enclosing_scope(id).unwrap_or(CRATE_NODE_ID) } } fn make_signature(decl: &ast::FnDecl, generics: &ast::Generics) -> String { - let mut sig = String::new(); + let mut sig = "fn ".to_owned(); if !generics.lifetimes.is_empty() || !generics.ty_params.is_empty() { sig.push('<'); sig.push_str(&generics.lifetimes.iter() @@ -671,7 +693,7 @@ fn make_signature(decl: &ast::FnDecl, generics: &ast::Generics) -> String { sig.push_str(&decl.inputs.iter().map(arg_to_string).collect::>().join(", ")); sig.push(')'); match decl.output { - ast::FunctionRetTy::Default(_) => {} + ast::FunctionRetTy::Default(_) => sig.push_str(" -> ()"), ast::FunctionRetTy::Ty(ref t) => sig.push_str(&format!(" -> {}", ty_to_string(t))), } @@ -693,11 +715,11 @@ impl PathCollector { impl Visitor for PathCollector { fn visit_pat(&mut self, p: &ast::Pat) { match p.node { - PatKind::Struct(ref path, _, _) => { + PatKind::Struct(ref path, ..) => { self.collected_paths.push((p.id, path.clone(), ast::Mutability::Mutable, recorder::TypeRef)); } - PatKind::TupleStruct(ref path, _, _) | + PatKind::TupleStruct(ref path, ..) | PatKind::Path(_, ref path) => { self.collected_paths.push((p.id, path.clone(), ast::Mutability::Mutable, recorder::VarRef)); @@ -724,17 +746,34 @@ impl Visitor for PathCollector { } } -#[derive(Clone, Copy, Debug)] +fn docs_for_attrs(attrs: &[Attribute]) -> String { + let doc = InternedString::new("doc"); + let mut result = String::new(); + + for attr in attrs { + if attr.name() == doc { + if let Some(ref val) = attr.value_str() { + result.push_str(&strip_doc_comment_decoration(val)); + result.push('\n'); + } + } + } + + result +} + +#[derive(Clone, Copy, Debug, RustcEncodable)] pub enum Format { Csv, Json, + JsonApi, } impl Format { fn extension(&self) -> &'static str { match *self { Format::Csv => ".csv", - Format::Json => ".json", + Format::Json | Format::JsonApi => ".json", } } } @@ -804,6 +843,7 @@ pub fn process_crate<'l, 'tcx>(tcx: TyCtxt<'l, 'tcx, 'tcx>, match format { Format::Csv => dump!(CsvDumper::new(output)), Format::Json => dump!(JsonDumper::new(output)), + Format::JsonApi => dump!(JsonApiDumper::new(output)), } } diff --git a/src/librustc_trans/_match.rs b/src/librustc_trans/_match.rs deleted file mode 100644 index 27a8c1f1df..0000000000 --- a/src/librustc_trans/_match.rs +++ /dev/null @@ -1,2012 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Compilation of match statements -//! -//! I will endeavor to explain the code as best I can. I have only a loose -//! understanding of some parts of it. -//! -//! ## Matching -//! -//! The basic state of the code is maintained in an array `m` of `Match` -//! objects. Each `Match` describes some list of patterns, all of which must -//! match against the current list of values. If those patterns match, then -//! the arm listed in the match is the correct arm. A given arm may have -//! multiple corresponding match entries, one for each alternative that -//! remains. As we proceed these sets of matches are adjusted by the various -//! `enter_XXX()` functions, each of which adjusts the set of options given -//! some information about the value which has been matched. -//! -//! So, initially, there is one value and N matches, each of which have one -//! constituent pattern. N here is usually the number of arms but may be -//! greater, if some arms have multiple alternatives. For example, here: -//! -//! enum Foo { A, B(int), C(usize, usize) } -//! match foo { -//! A => ..., -//! B(x) => ..., -//! C(1, 2) => ..., -//! C(_) => ... -//! } -//! -//! The value would be `foo`. There would be four matches, each of which -//! contains one pattern (and, in one case, a guard). We could collect the -//! various options and then compile the code for the case where `foo` is an -//! `A`, a `B`, and a `C`. When we generate the code for `C`, we would (1) -//! drop the two matches that do not match a `C` and (2) expand the other two -//! into two patterns each. In the first case, the two patterns would be `1` -//! and `2`, and the in the second case the _ pattern would be expanded into -//! `_` and `_`. The two values are of course the arguments to `C`. -//! -//! Here is a quick guide to the various functions: -//! -//! - `compile_submatch()`: The main workhouse. It takes a list of values and -//! a list of matches and finds the various possibilities that could occur. -//! -//! - `enter_XXX()`: modifies the list of matches based on some information -//! about the value that has been matched. For example, -//! `enter_rec_or_struct()` adjusts the values given that a record or struct -//! has been matched. This is an infallible pattern, so *all* of the matches -//! must be either wildcards or record/struct patterns. `enter_opt()` -//! handles the fallible cases, and it is correspondingly more complex. -//! -//! ## Bindings -//! -//! We store information about the bound variables for each arm as part of the -//! per-arm `ArmData` struct. There is a mapping from identifiers to -//! `BindingInfo` structs. These structs contain the mode/id/type of the -//! binding, but they also contain an LLVM value which points at an alloca -//! called `llmatch`. For by value bindings that are Copy, we also create -//! an extra alloca that we copy the matched value to so that any changes -//! we do to our copy is not reflected in the original and vice-versa. -//! We don't do this if it's a move since the original value can't be used -//! and thus allowing us to cheat in not creating an extra alloca. -//! -//! The `llmatch` binding always stores a pointer into the value being matched -//! which points at the data for the binding. If the value being matched has -//! type `T`, then, `llmatch` will point at an alloca of type `T*` (and hence -//! `llmatch` has type `T**`). So, if you have a pattern like: -//! -//! let a: A = ...; -//! let b: B = ...; -//! match (a, b) { (ref c, d) => { ... } } -//! -//! For `c` and `d`, we would generate allocas of type `C*` and `D*` -//! respectively. These are called the `llmatch`. As we match, when we come -//! up against an identifier, we store the current pointer into the -//! corresponding alloca. -//! -//! Once a pattern is completely matched, and assuming that there is no guard -//! pattern, we will branch to a block that leads to the body itself. For any -//! by-value bindings, this block will first load the ptr from `llmatch` (the -//! one of type `D*`) and then load a second time to get the actual value (the -//! one of type `D`). For by ref bindings, the value of the local variable is -//! simply the first alloca. -//! -//! So, for the example above, we would generate a setup kind of like this: -//! -//! +-------+ -//! | Entry | -//! +-------+ -//! | -//! +--------------------------------------------+ -//! | llmatch_c = (addr of first half of tuple) | -//! | llmatch_d = (addr of second half of tuple) | -//! +--------------------------------------------+ -//! | -//! +--------------------------------------+ -//! | *llbinding_d = **llmatch_d | -//! +--------------------------------------+ -//! -//! If there is a guard, the situation is slightly different, because we must -//! execute the guard code. Moreover, we need to do so once for each of the -//! alternatives that lead to the arm, because if the guard fails, they may -//! have different points from which to continue the search. Therefore, in that -//! case, we generate code that looks more like: -//! -//! +-------+ -//! | Entry | -//! +-------+ -//! | -//! +-------------------------------------------+ -//! | llmatch_c = (addr of first half of tuple) | -//! | llmatch_d = (addr of first half of tuple) | -//! +-------------------------------------------+ -//! | -//! +-------------------------------------------------+ -//! | *llbinding_d = **llmatch_d | -//! | check condition | -//! | if false { goto next case } | -//! | if true { goto body } | -//! +-------------------------------------------------+ -//! -//! The handling for the cleanups is a bit... sensitive. Basically, the body -//! is the one that invokes `add_clean()` for each binding. During the guard -//! evaluation, we add temporary cleanups and revoke them after the guard is -//! evaluated (it could fail, after all). Note that guards and moves are -//! just plain incompatible. -//! -//! Some relevant helper functions that manage bindings: -//! - `create_bindings_map()` -//! - `insert_lllocals()` -//! -//! -//! ## Notes on vector pattern matching. -//! -//! Vector pattern matching is surprisingly tricky. The problem is that -//! the structure of the vector isn't fully known, and slice matches -//! can be done on subparts of it. -//! -//! The way that vector pattern matches are dealt with, then, is as -//! follows. First, we make the actual condition associated with a -//! vector pattern simply a vector length comparison. So the pattern -//! [1, .. x] gets the condition "vec len >= 1", and the pattern -//! [.. x] gets the condition "vec len >= 0". The problem here is that -//! having the condition "vec len >= 1" hold clearly does not mean that -//! only a pattern that has exactly that condition will match. This -//! means that it may well be the case that a condition holds, but none -//! of the patterns matching that condition match; to deal with this, -//! when doing vector length matches, we have match failures proceed to -//! the next condition to check. -//! -//! There are a couple more subtleties to deal with. While the "actual" -//! condition associated with vector length tests is simply a test on -//! the vector length, the actual vec_len Opt entry contains more -//! information used to restrict which matches are associated with it. -//! So that all matches in a submatch are matching against the same -//! values from inside the vector, they are split up by how many -//! elements they match at the front and at the back of the vector. In -//! order to make sure that arms are properly checked in order, even -//! with the overmatching conditions, each vec_len Opt entry is -//! associated with a range of matches. -//! Consider the following: -//! -//! match &[1, 2, 3] { -//! [1, 1, .. _] => 0, -//! [1, 2, 2, .. _] => 1, -//! [1, 2, 3, .. _] => 2, -//! [1, 2, .. _] => 3, -//! _ => 4 -//! } -//! The proper arm to match is arm 2, but arms 0 and 3 both have the -//! condition "len >= 2". If arm 3 was lumped in with arm 0, then the -//! wrong branch would be taken. Instead, vec_len Opts are associated -//! with a contiguous range of matches that have the same "shape". -//! This is sort of ugly and requires a bunch of special handling of -//! vec_len options. - -pub use self::BranchKind::*; -pub use self::OptResult::*; -pub use self::TransBindingMode::*; -use self::Opt::*; -use self::FailureHandler::*; - -use llvm::{ValueRef, BasicBlockRef}; -use rustc_const_eval::check_match::{self, Constructor, StaticInliner}; -use rustc_const_eval::{compare_lit_exprs, eval_const_expr, fatal_const_eval_err}; -use rustc::hir::def::{Def, DefMap}; -use rustc::hir::def_id::DefId; -use middle::expr_use_visitor as euv; -use middle::lang_items::StrEqFnLangItem; -use middle::mem_categorization as mc; -use middle::mem_categorization::Categorization; -use rustc::hir::pat_util::*; -use rustc::ty::subst::Substs; -use adt; -use base::*; -use build::{AddCase, And, Br, CondBr, GEPi, InBoundsGEP, Load, PointerCast}; -use build::{Not, Store, Sub, add_comment}; -use build; -use callee::{Callee, ArgVals}; -use cleanup::{self, CleanupMethods, DropHintMethods}; -use common::*; -use consts; -use datum::*; -use debuginfo::{self, DebugLoc, ToDebugLoc}; -use expr::{self, Dest}; -use monomorphize; -use tvec; -use type_of; -use Disr; -use value::Value; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::traits::Reveal; -use session::config::NoDebugInfo; -use util::common::indenter; -use util::nodemap::FnvHashMap; -use util::ppaux; - -use std; -use std::cell::RefCell; -use std::cmp::Ordering; -use std::fmt; -use std::rc::Rc; -use rustc::hir::{self, PatKind}; -use syntax::ast::{self, DUMMY_NODE_ID, NodeId}; -use syntax_pos::Span; -use rustc::hir::fold::Folder; -use syntax::ptr::P; - -#[derive(Copy, Clone, Debug)] -struct ConstantExpr<'a>(&'a hir::Expr); - -impl<'a> ConstantExpr<'a> { - fn eq<'b, 'tcx>(self, other: ConstantExpr<'a>, tcx: TyCtxt<'b, 'tcx, 'tcx>) -> bool { - match compare_lit_exprs(tcx, self.0.span, self.0, other.0) { - Ok(result) => result == Ordering::Equal, - Err(_) => bug!("compare_list_exprs: type mismatch"), - } - } -} - -// An option identifying a branch (either a literal, an enum variant or a range) -#[derive(Debug)] -enum Opt<'a, 'tcx> { - ConstantValue(ConstantExpr<'a>, DebugLoc), - ConstantRange(ConstantExpr<'a>, ConstantExpr<'a>, DebugLoc), - Variant(Disr, Rc>, DefId, DebugLoc), - SliceLengthEqual(usize, DebugLoc), - SliceLengthGreaterOrEqual(/* prefix length */ usize, - /* suffix length */ usize, - DebugLoc), -} - -impl<'a, 'b, 'tcx> Opt<'a, 'tcx> { - fn eq(&self, other: &Opt<'a, 'tcx>, tcx: TyCtxt<'b, 'tcx, 'tcx>) -> bool { - match (self, other) { - (&ConstantValue(a, _), &ConstantValue(b, _)) => a.eq(b, tcx), - (&ConstantRange(a1, a2, _), &ConstantRange(b1, b2, _)) => { - a1.eq(b1, tcx) && a2.eq(b2, tcx) - } - (&Variant(a_disr, ref a_repr, a_def, _), - &Variant(b_disr, ref b_repr, b_def, _)) => { - a_disr == b_disr && *a_repr == *b_repr && a_def == b_def - } - (&SliceLengthEqual(a, _), &SliceLengthEqual(b, _)) => a == b, - (&SliceLengthGreaterOrEqual(a1, a2, _), - &SliceLengthGreaterOrEqual(b1, b2, _)) => { - a1 == b1 && a2 == b2 - } - _ => false - } - } - - fn trans<'blk>(&self, mut bcx: Block<'blk, 'tcx>) -> OptResult<'blk, 'tcx> { - use consts::TrueConst::Yes; - let _icx = push_ctxt("match::trans_opt"); - let ccx = bcx.ccx(); - match *self { - ConstantValue(ConstantExpr(lit_expr), _) => { - let lit_ty = bcx.tcx().node_id_to_type(lit_expr.id); - let expr = consts::const_expr(ccx, &lit_expr, bcx.fcx.param_substs, None, Yes); - let llval = match expr { - Ok((llval, _)) => llval, - Err(err) => { - fatal_const_eval_err(bcx.tcx(), err.as_inner(), lit_expr.span, "pattern"); - } - }; - let lit_datum = immediate_rvalue(llval, lit_ty); - let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx)); - SingleResult(Result::new(bcx, lit_datum.val)) - } - ConstantRange(ConstantExpr(ref l1), ConstantExpr(ref l2), _) => { - let l1 = match consts::const_expr(ccx, &l1, bcx.fcx.param_substs, None, Yes) { - Ok((l1, _)) => l1, - Err(err) => fatal_const_eval_err(bcx.tcx(), err.as_inner(), l1.span, "pattern"), - }; - let l2 = match consts::const_expr(ccx, &l2, bcx.fcx.param_substs, None, Yes) { - Ok((l2, _)) => l2, - Err(err) => fatal_const_eval_err(bcx.tcx(), err.as_inner(), l2.span, "pattern"), - }; - RangeResult(Result::new(bcx, l1), Result::new(bcx, l2)) - } - Variant(disr_val, ref repr, _, _) => { - SingleResult(Result::new(bcx, adt::trans_case(bcx, &repr, disr_val))) - } - SliceLengthEqual(length, _) => { - SingleResult(Result::new(bcx, C_uint(ccx, length))) - } - SliceLengthGreaterOrEqual(prefix, suffix, _) => { - LowerBound(Result::new(bcx, C_uint(ccx, prefix + suffix))) - } - } - } - - fn debug_loc(&self) -> DebugLoc { - match *self { - ConstantValue(_,debug_loc) | - ConstantRange(_, _, debug_loc) | - Variant(_, _, _, debug_loc) | - SliceLengthEqual(_, debug_loc) | - SliceLengthGreaterOrEqual(_, _, debug_loc) => debug_loc - } - } -} - -#[derive(Copy, Clone, PartialEq)] -pub enum BranchKind { - NoBranch, - Single, - Switch, - Compare, - CompareSliceLength -} - -pub enum OptResult<'blk, 'tcx: 'blk> { - SingleResult(Result<'blk, 'tcx>), - RangeResult(Result<'blk, 'tcx>, Result<'blk, 'tcx>), - LowerBound(Result<'blk, 'tcx>) -} - -#[derive(Clone, Copy, PartialEq)] -pub enum TransBindingMode { - /// By-value binding for a copy type: copies from matched data - /// into a fresh LLVM alloca. - TrByCopy(/* llbinding */ ValueRef), - - /// By-value binding for a non-copy type where we copy into a - /// fresh LLVM alloca; this most accurately reflects the language - /// semantics (e.g. it properly handles overwrites of the matched - /// input), but potentially injects an unwanted copy. - TrByMoveIntoCopy(/* llbinding */ ValueRef), - - /// Binding a non-copy type by reference under the hood; this is - /// a codegen optimization to avoid unnecessary memory traffic. - TrByMoveRef, - - /// By-ref binding exposed in the original source input. - TrByRef, -} - -impl TransBindingMode { - /// if binding by making a fresh copy; returns the alloca that it - /// will copy into; otherwise None. - fn alloca_if_copy(&self) -> Option { - match *self { - TrByCopy(llbinding) | TrByMoveIntoCopy(llbinding) => Some(llbinding), - TrByMoveRef | TrByRef => None, - } - } -} - -/// Information about a pattern binding: -/// - `llmatch` is a pointer to a stack slot. The stack slot contains a -/// pointer into the value being matched. Hence, llmatch has type `T**` -/// where `T` is the value being matched. -/// - `trmode` is the trans binding mode -/// - `id` is the node id of the binding -/// - `ty` is the Rust type of the binding -#[derive(Clone, Copy)] -pub struct BindingInfo<'tcx> { - pub llmatch: ValueRef, - pub trmode: TransBindingMode, - pub id: ast::NodeId, - pub span: Span, - pub ty: Ty<'tcx>, -} - -type BindingsMap<'tcx> = FnvHashMap>; - -struct ArmData<'p, 'blk, 'tcx: 'blk> { - bodycx: Block<'blk, 'tcx>, - arm: &'p hir::Arm, - bindings_map: BindingsMap<'tcx> -} - -/// Info about Match. -/// If all `pats` are matched then arm `data` will be executed. -/// As we proceed `bound_ptrs` are filled with pointers to values to be bound, -/// these pointers are stored in llmatch variables just before executing `data` arm. -struct Match<'a, 'p: 'a, 'blk: 'a, 'tcx: 'blk> { - pats: Vec<&'p hir::Pat>, - data: &'a ArmData<'p, 'blk, 'tcx>, - bound_ptrs: Vec<(ast::Name, ValueRef)>, - // Thread along renamings done by the check_match::StaticInliner, so we can - // map back to original NodeIds - pat_renaming_map: Option<&'a FnvHashMap<(NodeId, Span), NodeId>> -} - -impl<'a, 'p, 'blk, 'tcx> fmt::Debug for Match<'a, 'p, 'blk, 'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if ppaux::verbose() { - // for many programs, this just take too long to serialize - write!(f, "{:?}", self.pats) - } else { - write!(f, "{} pats", self.pats.len()) - } - } -} - -fn has_nested_bindings(m: &[Match], col: usize) -> bool { - for br in m { - if let PatKind::Binding(_, _, Some(..)) = br.pats[col].node { - return true - } - } - false -} - -// As noted in `fn match_datum`, we should eventually pass around a -// `Datum` for the `val`; but until we get to that point, this -// `MatchInput` struct will serve -- it has everything `Datum` -// does except for the type field. -#[derive(Copy, Clone)] -pub struct MatchInput { val: ValueRef, lval: Lvalue } - -impl<'tcx> Datum<'tcx, Lvalue> { - pub fn match_input(&self) -> MatchInput { - MatchInput { - val: self.val, - lval: self.kind, - } - } -} - -impl fmt::Debug for MatchInput { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&Value(self.val), f) - } -} - -impl MatchInput { - fn from_val(val: ValueRef) -> MatchInput { - MatchInput { - val: val, - lval: Lvalue::new("MatchInput::from_val"), - } - } - - fn to_datum<'tcx>(self, ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> { - Datum::new(self.val, ty, self.lval) - } -} - -fn expand_nested_bindings<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize, - val: MatchInput) - -> Vec> { - debug!("expand_nested_bindings(bcx={}, m={:?}, col={}, val={:?})", - bcx.to_str(), m, col, val); - let _indenter = indenter(); - - m.iter().map(|br| { - let mut bound_ptrs = br.bound_ptrs.clone(); - let mut pat = br.pats[col]; - loop { - pat = match pat.node { - PatKind::Binding(_, ref path, Some(ref inner)) => { - bound_ptrs.push((path.node, val.val)); - &inner - }, - _ => break - } - } - - let mut pats = br.pats.clone(); - pats[col] = pat; - Match { - pats: pats, - data: &br.data, - bound_ptrs: bound_ptrs, - pat_renaming_map: br.pat_renaming_map, - } - }).collect() -} - -fn enter_match<'a, 'b, 'p, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize, - val: MatchInput, - mut e: F) - -> Vec> where - F: FnMut(&[(&'p hir::Pat, Option>)]) - -> Option>)>>, -{ - debug!("enter_match(bcx={}, m={:?}, col={}, val={:?})", - bcx.to_str(), m, col, val); - let _indenter = indenter(); - - m.iter().filter_map(|br| { - let pats : Vec<_> = br.pats.iter().map(|p| (*p, None)).collect(); - e(&pats).map(|pats| { - let this = br.pats[col]; - let mut bound_ptrs = br.bound_ptrs.clone(); - match this.node { - PatKind::Binding(_, ref path, None) => { - bound_ptrs.push((path.node, val.val)); - } - PatKind::Vec(ref before, Some(ref slice), ref after) => { - if let PatKind::Binding(_, ref path, None) = slice.node { - let subslice_val = bind_subslice_pat( - bcx, this.id, val, - before.len(), after.len()); - bound_ptrs.push((path.node, subslice_val)); - } - } - _ => {} - } - Match { - pats: pats.into_iter().map(|p| p.0).collect(), - data: br.data, - bound_ptrs: bound_ptrs, - pat_renaming_map: br.pat_renaming_map, - } - }) - }).collect() -} - -fn enter_default<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize, - val: MatchInput) - -> Vec> { - debug!("enter_default(bcx={}, m={:?}, col={}, val={:?})", - bcx.to_str(), m, col, val); - let _indenter = indenter(); - - // Collect all of the matches that can match against anything. - enter_match(bcx, m, col, val, |pats| { - match pats[col].0.node { - PatKind::Binding(..) | PatKind::Wild => { - let mut r = pats[..col].to_vec(); - r.extend_from_slice(&pats[col + 1..]); - Some(r) - } - _ => None - } - }) -} - -// nmatsakis: what does enter_opt do? -// in trans/match -// trans/match.rs is like stumbling around in a dark cave -// pcwalton: the enter family of functions adjust the set of -// patterns as needed -// yeah, at some point I kind of achieved some level of -// understanding -// anyhow, they adjust the patterns given that something of that -// kind has been found -// pcwalton: ok, right, so enter_XXX() adjusts the patterns, as I -// said -// enter_match() kind of embodies the generic code -// it is provided with a function that tests each pattern to see -// if it might possibly apply and so forth -// so, if you have a pattern like {a: _, b: _, _} and one like _ -// then _ would be expanded to (_, _) -// one spot for each of the sub-patterns -// enter_opt() is one of the more complex; it covers the fallible -// cases -// enter_rec_or_struct() or enter_tuple() are simpler, since they -// are infallible patterns -// so all patterns must either be records (resp. tuples) or -// wildcards - -/// The above is now outdated in that enter_match() now takes a function that -/// takes the complete row of patterns rather than just the first one. -/// Also, most of the enter_() family functions have been unified with -/// the check_match specialization step. -fn enter_opt<'a, 'p, 'blk, 'tcx>( - bcx: Block<'blk, 'tcx>, - _: ast::NodeId, - m: &[Match<'a, 'p, 'blk, 'tcx>], - opt: &Opt, - col: usize, - variant_size: usize, - val: MatchInput) - -> Vec> { - debug!("enter_opt(bcx={}, m={:?}, opt={:?}, col={}, val={:?})", - bcx.to_str(), m, *opt, col, val); - let _indenter = indenter(); - - let ctor = match opt { - &ConstantValue(ConstantExpr(expr), _) => Constructor::ConstantValue( - eval_const_expr(bcx.tcx(), &expr) - ), - &ConstantRange(ConstantExpr(lo), ConstantExpr(hi), _) => Constructor::ConstantRange( - eval_const_expr(bcx.tcx(), &lo), - eval_const_expr(bcx.tcx(), &hi) - ), - &SliceLengthEqual(n, _) => - Constructor::Slice(n), - &SliceLengthGreaterOrEqual(before, after, _) => - Constructor::SliceWithSubslice(before, after), - &Variant(_, _, def_id, _) => - Constructor::Variant(def_id) - }; - - let param_env = bcx.tcx().empty_parameter_environment(); - let mcx = check_match::MatchCheckCtxt { - tcx: bcx.tcx(), - param_env: param_env, - }; - enter_match(bcx, m, col, val, |pats| - check_match::specialize(&mcx, &pats[..], &ctor, col, variant_size) - ) -} - -// Returns the options in one column of matches. An option is something that -// needs to be conditionally matched at runtime; for example, the discriminant -// on a set of enum variants or a literal. -fn get_branches<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize) - -> Vec> { - let tcx = bcx.tcx(); - - let mut found: Vec = vec![]; - for br in m { - let cur = br.pats[col]; - let debug_loc = match br.pat_renaming_map { - Some(pat_renaming_map) => { - match pat_renaming_map.get(&(cur.id, cur.span)) { - Some(&id) => DebugLoc::At(id, cur.span), - None => DebugLoc::At(cur.id, cur.span), - } - } - None => DebugLoc::None - }; - - let opt = match cur.node { - PatKind::Lit(ref l) => { - ConstantValue(ConstantExpr(&l), debug_loc) - } - PatKind::Path(..) | PatKind::TupleStruct(..) | PatKind::Struct(..) => { - match tcx.expect_def(cur.id) { - Def::Variant(enum_id, var_id) => { - let variant = tcx.lookup_adt_def(enum_id).variant_with_id(var_id); - Variant(Disr::from(variant.disr_val), - adt::represent_node(bcx, cur.id), - var_id, - debug_loc) - } - _ => continue - } - } - PatKind::Range(ref l1, ref l2) => { - ConstantRange(ConstantExpr(&l1), ConstantExpr(&l2), debug_loc) - } - PatKind::Vec(ref before, None, ref after) => { - SliceLengthEqual(before.len() + after.len(), debug_loc) - } - PatKind::Vec(ref before, Some(_), ref after) => { - SliceLengthGreaterOrEqual(before.len(), after.len(), debug_loc) - } - _ => continue - }; - - if !found.iter().any(|x| x.eq(&opt, tcx)) { - found.push(opt); - } - } - found -} - -struct ExtractedBlock<'blk, 'tcx: 'blk> { - vals: Vec, - bcx: Block<'blk, 'tcx>, -} - -fn extract_variant_args<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - repr: &adt::Repr<'tcx>, - disr_val: Disr, - val: MatchInput) - -> ExtractedBlock<'blk, 'tcx> { - let _icx = push_ctxt("match::extract_variant_args"); - // Assume enums are always sized for now. - let val = adt::MaybeSizedValue::sized(val.val); - let args = (0..adt::num_args(repr, disr_val)).map(|i| { - adt::trans_field_ptr(bcx, repr, val, disr_val, i) - }).collect(); - - ExtractedBlock { vals: args, bcx: bcx } -} - -/// Helper for converting from the ValueRef that we pass around in the match code, which is always -/// an lvalue, into a Datum. Eventually we should just pass around a Datum and be done with it. -fn match_datum<'tcx>(val: MatchInput, left_ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> { - val.to_datum(left_ty) -} - -fn bind_subslice_pat(bcx: Block, - pat_id: ast::NodeId, - val: MatchInput, - offset_left: usize, - offset_right: usize) -> ValueRef { - let _icx = push_ctxt("match::bind_subslice_pat"); - let vec_ty = node_id_type(bcx, pat_id); - let vec_ty_contents = match vec_ty.sty { - ty::TyBox(ty) => ty, - ty::TyRef(_, mt) | ty::TyRawPtr(mt) => mt.ty, - _ => vec_ty - }; - let unit_ty = vec_ty_contents.sequence_element_type(bcx.tcx()); - let vec_datum = match_datum(val, vec_ty); - let (base, len) = vec_datum.get_vec_base_and_len(bcx); - - let slice_begin = InBoundsGEP(bcx, base, &[C_uint(bcx.ccx(), offset_left)]); - let diff = offset_left + offset_right; - if let ty::TyArray(ty, n) = vec_ty_contents.sty { - let array_ty = bcx.tcx().mk_array(ty, n-diff); - let llty_array = type_of::type_of(bcx.ccx(), array_ty); - return PointerCast(bcx, slice_begin, llty_array.ptr_to()); - } - - let slice_len_offset = C_uint(bcx.ccx(), diff); - let slice_len = Sub(bcx, len, slice_len_offset, DebugLoc::None); - let slice_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReErased), - bcx.tcx().mk_slice(unit_ty)); - let scratch = rvalue_scratch_datum(bcx, slice_ty, ""); - Store(bcx, slice_begin, expr::get_dataptr(bcx, scratch.val)); - Store(bcx, slice_len, expr::get_meta(bcx, scratch.val)); - scratch.val -} - -fn extract_vec_elems<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - left_ty: Ty<'tcx>, - before: usize, - after: usize, - val: MatchInput) - -> ExtractedBlock<'blk, 'tcx> { - let _icx = push_ctxt("match::extract_vec_elems"); - let vec_datum = match_datum(val, left_ty); - let (base, len) = vec_datum.get_vec_base_and_len(bcx); - let mut elems = vec![]; - elems.extend((0..before).map(|i| GEPi(bcx, base, &[i]))); - elems.extend((0..after).rev().map(|i| { - InBoundsGEP(bcx, base, &[ - Sub(bcx, len, C_uint(bcx.ccx(), i + 1), DebugLoc::None) - ]) - })); - ExtractedBlock { vals: elems, bcx: bcx } -} - -// Macro for deciding whether any of the remaining matches fit a given kind of -// pattern. Note that, because the macro is well-typed, either ALL of the -// matches should fit that sort of pattern or NONE (however, some of the -// matches may be wildcards like _ or identifiers). -macro_rules! any_pat { - ($m:expr, $col:expr, $pattern:pat) => ( - ($m).iter().any(|br| { - match br.pats[$col].node { - $pattern => true, - _ => false - } - }) - ) -} - -fn any_uniq_pat(m: &[Match], col: usize) -> bool { - any_pat!(m, col, PatKind::Box(_)) -} - -fn any_region_pat(m: &[Match], col: usize) -> bool { - any_pat!(m, col, PatKind::Ref(..)) -} - -fn any_irrefutable_adt_pat(tcx: TyCtxt, m: &[Match], col: usize) -> bool { - m.iter().any(|br| { - let pat = br.pats[col]; - match pat.node { - PatKind::Tuple(..) => true, - PatKind::Struct(..) | PatKind::TupleStruct(..) | PatKind::Path(..) => { - match tcx.expect_def(pat.id) { - Def::Struct(..) | Def::TyAlias(..) | Def::AssociatedTy(..) => true, - _ => false, - } - } - _ => false - } - }) -} - -/// What to do when the pattern match fails. -enum FailureHandler { - Infallible, - JumpToBasicBlock(BasicBlockRef), - Unreachable -} - -impl FailureHandler { - fn is_fallible(&self) -> bool { - match *self { - Infallible => false, - _ => true - } - } - - fn is_infallible(&self) -> bool { - !self.is_fallible() - } - - fn handle_fail(&self, bcx: Block) { - match *self { - Infallible => - bug!("attempted to panic in a non-panicking panic handler!"), - JumpToBasicBlock(basic_block) => - Br(bcx, basic_block, DebugLoc::None), - Unreachable => - build::Unreachable(bcx) - } - } -} - -fn pick_column_to_specialize(def_map: &RefCell, m: &[Match]) -> Option { - fn pat_score(def_map: &RefCell, pat: &hir::Pat) -> usize { - match pat.node { - PatKind::Binding(_, _, Some(ref inner)) => pat_score(def_map, &inner), - _ if pat_is_refutable(&def_map.borrow(), pat) => 1, - _ => 0 - } - } - - let column_score = |m: &[Match], col: usize| -> usize { - let total_score = m.iter() - .map(|row| row.pats[col]) - .map(|pat| pat_score(def_map, pat)) - .sum(); - - // Irrefutable columns always go first, they'd only be duplicated in the branches. - if total_score == 0 { - std::usize::MAX - } else { - total_score - } - }; - - let column_contains_any_nonwild_patterns = |&col: &usize| -> bool { - m.iter().any(|row| match row.pats[col].node { - PatKind::Wild => false, - _ => true - }) - }; - - (0..m[0].pats.len()) - .filter(column_contains_any_nonwild_patterns) - .map(|col| (col, column_score(m, col))) - .max_by_key(|&(_, score)| score) - .map(|(col, _)| col) -} - -// Compiles a comparison between two things. -fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, - rhs_t: Ty<'tcx>, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - fn compare_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lhs_data: ValueRef, - lhs_len: ValueRef, - rhs_data: ValueRef, - rhs_len: ValueRef, - rhs_t: Ty<'tcx>, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - let did = langcall(bcx.tcx(), - None, - &format!("comparison of `{}`", rhs_t), - StrEqFnLangItem); - let args = [lhs_data, lhs_len, rhs_data, rhs_len]; - Callee::def(bcx.ccx(), did, bcx.tcx().mk_substs(Substs::empty())) - .call(bcx, debug_loc, ArgVals(&args), None) - } - - let _icx = push_ctxt("compare_values"); - if rhs_t.is_scalar() { - let cmp = compare_scalar_types(cx, lhs, rhs, rhs_t, hir::BiEq, debug_loc); - return Result::new(cx, cmp); - } - - match rhs_t.sty { - ty::TyRef(_, mt) => match mt.ty.sty { - ty::TyStr => { - let lhs_data = Load(cx, expr::get_dataptr(cx, lhs)); - let lhs_len = Load(cx, expr::get_meta(cx, lhs)); - let rhs_data = Load(cx, expr::get_dataptr(cx, rhs)); - let rhs_len = Load(cx, expr::get_meta(cx, rhs)); - compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc) - } - ty::TyArray(ty, _) | ty::TySlice(ty) => match ty.sty { - ty::TyUint(ast::UintTy::U8) => { - // NOTE: cast &[u8] and &[u8; N] to &str and abuse the str_eq lang item, - // which calls memcmp(). - let pat_len = val_ty(rhs).element_type().array_length(); - let ty_str_slice = cx.tcx().mk_static_str(); - - let rhs_data = GEPi(cx, rhs, &[0, 0]); - let rhs_len = C_uint(cx.ccx(), pat_len); - - let lhs_data; - let lhs_len; - if val_ty(lhs) == val_ty(rhs) { - // Both the discriminant and the pattern are thin pointers - lhs_data = GEPi(cx, lhs, &[0, 0]); - lhs_len = C_uint(cx.ccx(), pat_len); - } else { - // The discriminant is a fat pointer - let llty_str_slice = type_of::type_of(cx.ccx(), ty_str_slice).ptr_to(); - let lhs_str = PointerCast(cx, lhs, llty_str_slice); - lhs_data = Load(cx, expr::get_dataptr(cx, lhs_str)); - lhs_len = Load(cx, expr::get_meta(cx, lhs_str)); - } - - compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc) - }, - _ => bug!("only byte strings supported in compare_values"), - }, - _ => bug!("only string and byte strings supported in compare_values"), - }, - _ => bug!("only scalars, byte strings, and strings supported in compare_values"), - } -} - -/// For each binding in `data.bindings_map`, adds an appropriate entry into the `fcx.lllocals` map -fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - bindings_map: &BindingsMap<'tcx>, - cs: Option) - -> Block<'blk, 'tcx> { - for (&name, &binding_info) in bindings_map { - let (llval, aliases_other_state) = match binding_info.trmode { - // By value mut binding for a copy type: load from the ptr - // into the matched value and copy to our alloca - TrByCopy(llbinding) | - TrByMoveIntoCopy(llbinding) => { - let llval = Load(bcx, binding_info.llmatch); - let lvalue = match binding_info.trmode { - TrByCopy(..) => - Lvalue::new("_match::insert_lllocals"), - TrByMoveIntoCopy(..) => { - // match_input moves from the input into a - // separate stack slot. - // - // E.g. consider moving the value `D(A)` out - // of the tuple `(D(A), D(B))` and into the - // local variable `x` via the pattern `(x,_)`, - // leaving the remainder of the tuple `(_, - // D(B))` still to be dropped in the future. - // - // Thus, here we must zero the place that we - // are moving *from*, because we do not yet - // track drop flags for a fragmented parent - // match input expression. - // - // Longer term we will be able to map the move - // into `(x, _)` up to the parent path that - // owns the whole tuple, and mark the - // corresponding stack-local drop-flag - // tracking the first component of the tuple. - let hint_kind = HintKind::ZeroAndMaintain; - Lvalue::new_with_hint("_match::insert_lllocals (match_input)", - bcx, binding_info.id, hint_kind) - } - _ => bug!(), - }; - let datum = Datum::new(llval, binding_info.ty, lvalue); - call_lifetime_start(bcx, llbinding); - bcx = datum.store_to(bcx, llbinding); - if let Some(cs) = cs { - bcx.fcx.schedule_lifetime_end(cs, llbinding); - } - - (llbinding, false) - }, - - // By value move bindings: load from the ptr into the matched value - TrByMoveRef => (Load(bcx, binding_info.llmatch), true), - - // By ref binding: use the ptr into the matched value - TrByRef => (binding_info.llmatch, true), - }; - - - // A local that aliases some other state must be zeroed, since - // the other state (e.g. some parent data that we matched - // into) will still have its subcomponents (such as this - // local) destructed at the end of the parent's scope. Longer - // term, we will properly map such parents to the set of - // unique drop flags for its fragments. - let hint_kind = if aliases_other_state { - HintKind::ZeroAndMaintain - } else { - HintKind::DontZeroJustUse - }; - let lvalue = Lvalue::new_with_hint("_match::insert_lllocals (local)", - bcx, - binding_info.id, - hint_kind); - let datum = Datum::new(llval, binding_info.ty, lvalue); - if let Some(cs) = cs { - let opt_datum = lvalue.dropflag_hint(bcx); - bcx.fcx.schedule_lifetime_end(cs, binding_info.llmatch); - bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty, opt_datum); - } - - debug!("binding {} to {:?}", binding_info.id, Value(llval)); - bcx.fcx.lllocals.borrow_mut().insert(binding_info.id, datum); - debuginfo::create_match_binding_metadata(bcx, name, binding_info); - } - bcx -} - -fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - guard_expr: &hir::Expr, - data: &ArmData<'p, 'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - vals: &[MatchInput], - chk: &FailureHandler, - has_genuine_default: bool) - -> Block<'blk, 'tcx> { - debug!("compile_guard(bcx={}, guard_expr={:?}, m={:?}, vals={:?})", - bcx.to_str(), guard_expr, m, vals); - let _indenter = indenter(); - - let mut bcx = insert_lllocals(bcx, &data.bindings_map, None); - - let val = unpack_datum!(bcx, expr::trans(bcx, guard_expr)); - let val = val.to_llbool(bcx); - - for (_, &binding_info) in &data.bindings_map { - if let Some(llbinding) = binding_info.trmode.alloca_if_copy() { - call_lifetime_end(bcx, llbinding) - } - } - - for (_, &binding_info) in &data.bindings_map { - bcx.fcx.lllocals.borrow_mut().remove(&binding_info.id); - } - - with_cond(bcx, Not(bcx, val, guard_expr.debug_loc()), |bcx| { - for (_, &binding_info) in &data.bindings_map { - call_lifetime_end(bcx, binding_info.llmatch); - } - match chk { - // If the default arm is the only one left, move on to the next - // condition explicitly rather than (possibly) falling back to - // the default arm. - &JumpToBasicBlock(_) if m.len() == 1 && has_genuine_default => { - chk.handle_fail(bcx); - } - _ => { - compile_submatch(bcx, m, vals, chk, has_genuine_default); - } - }; - bcx - }) -} - -fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - vals: &[MatchInput], - chk: &FailureHandler, - has_genuine_default: bool) { - debug!("compile_submatch(bcx={}, m={:?}, vals=[{:?}])", - bcx.to_str(), m, vals); - let _indenter = indenter(); - let _icx = push_ctxt("match::compile_submatch"); - let mut bcx = bcx; - if m.is_empty() { - if chk.is_fallible() { - chk.handle_fail(bcx); - } - return; - } - - let tcx = bcx.tcx(); - let def_map = &tcx.def_map; - match pick_column_to_specialize(def_map, m) { - Some(col) => { - let val = vals[col]; - if has_nested_bindings(m, col) { - let expanded = expand_nested_bindings(bcx, m, col, val); - compile_submatch_continue(bcx, - &expanded[..], - vals, - chk, - col, - val, - has_genuine_default) - } else { - compile_submatch_continue(bcx, m, vals, chk, col, val, has_genuine_default) - } - } - None => { - let data = &m[0].data; - for &(ref name, ref value_ptr) in &m[0].bound_ptrs { - let binfo = *data.bindings_map.get(name).unwrap(); - call_lifetime_start(bcx, binfo.llmatch); - if binfo.trmode == TrByRef && type_is_fat_ptr(bcx.tcx(), binfo.ty) { - expr::copy_fat_ptr(bcx, *value_ptr, binfo.llmatch); - } - else { - Store(bcx, *value_ptr, binfo.llmatch); - } - } - match data.arm.guard { - Some(ref guard_expr) => { - bcx = compile_guard(bcx, - &guard_expr, - m[0].data, - &m[1..m.len()], - vals, - chk, - has_genuine_default); - } - _ => () - } - Br(bcx, data.bodycx.llbb, DebugLoc::None); - } - } -} - -fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - vals: &[MatchInput], - chk: &FailureHandler, - col: usize, - val: MatchInput, - has_genuine_default: bool) { - let fcx = bcx.fcx; - let tcx = bcx.tcx(); - - let mut vals_left = vals[0..col].to_vec(); - vals_left.extend_from_slice(&vals[col + 1..]); - let ccx = bcx.fcx.ccx; - - // Find a real id (we're adding placeholder wildcard patterns, but - // each column is guaranteed to have at least one real pattern) - let pat_id = m.iter().map(|br| br.pats[col].id) - .find(|&id| id != DUMMY_NODE_ID) - .unwrap_or(DUMMY_NODE_ID); - - let left_ty = if pat_id == DUMMY_NODE_ID { - tcx.mk_nil() - } else { - node_id_type(bcx, pat_id) - }; - - let mcx = check_match::MatchCheckCtxt { - tcx: bcx.tcx(), - param_env: bcx.tcx().empty_parameter_environment(), - }; - let adt_vals = if any_irrefutable_adt_pat(bcx.tcx(), m, col) { - let repr = adt::represent_type(bcx.ccx(), left_ty); - let arg_count = adt::num_args(&repr, Disr(0)); - let (arg_count, struct_val) = if type_is_sized(bcx.tcx(), left_ty) { - (arg_count, val.val) - } else { - // For an unsized ADT (i.e. DST struct), we need to treat - // the last field specially: instead of simply passing a - // ValueRef pointing to that field, as with all the others, - // we skip it and instead construct a 'fat ptr' below. - (arg_count - 1, Load(bcx, expr::get_dataptr(bcx, val.val))) - }; - let mut field_vals: Vec = (0..arg_count).map(|ix| - // By definition, these are all sized - adt::trans_field_ptr(bcx, &repr, adt::MaybeSizedValue::sized(struct_val), Disr(0), ix) - ).collect(); - - match left_ty.sty { - ty::TyStruct(def, substs) if !type_is_sized(bcx.tcx(), left_ty) => { - // The last field is technically unsized but - // since we can only ever match that field behind - // a reference we construct a fat ptr here. - let unsized_ty = def.struct_variant().fields.last().map(|field| { - monomorphize::field_ty(bcx.tcx(), substs, field) - }).unwrap(); - let scratch = alloc_ty(bcx, unsized_ty, "__struct_field_fat_ptr"); - - let meta = Load(bcx, expr::get_meta(bcx, val.val)); - let struct_val = adt::MaybeSizedValue::unsized_(struct_val, meta); - - let data = adt::trans_field_ptr(bcx, &repr, struct_val, Disr(0), arg_count); - Store(bcx, data, expr::get_dataptr(bcx, scratch)); - Store(bcx, meta, expr::get_meta(bcx, scratch)); - field_vals.push(scratch); - } - _ => {} - } - Some(field_vals) - } else if any_uniq_pat(m, col) || any_region_pat(m, col) { - let ptr = if type_is_fat_ptr(bcx.tcx(), left_ty) { - val.val - } else { - Load(bcx, val.val) - }; - Some(vec!(ptr)) - } else { - match left_ty.sty { - ty::TyArray(_, n) => { - let args = extract_vec_elems(bcx, left_ty, n, 0, val); - Some(args.vals) - } - _ => None - } - }; - match adt_vals { - Some(field_vals) => { - let pats = enter_match(bcx, m, col, val, |pats| - check_match::specialize(&mcx, pats, - &Constructor::Single, col, - field_vals.len()) - ); - let mut vals: Vec<_> = field_vals.into_iter() - .map(|v|MatchInput::from_val(v)) - .collect(); - vals.extend_from_slice(&vals_left); - compile_submatch(bcx, &pats, &vals, chk, has_genuine_default); - return; - } - _ => () - } - - // Decide what kind of branch we need - let opts = get_branches(bcx, m, col); - debug!("options={:?}", opts); - let mut kind = NoBranch; - let mut test_val = val.val; - debug!("test_val={:?}", Value(test_val)); - if !opts.is_empty() { - match opts[0] { - ConstantValue(..) | ConstantRange(..) => { - test_val = load_if_immediate(bcx, val.val, left_ty); - kind = if left_ty.is_integral() { - Switch - } else { - Compare - }; - } - Variant(_, ref repr, _, _) => { - let (the_kind, val_opt) = adt::trans_switch(bcx, &repr, - val.val, true); - kind = the_kind; - if let Some(tval) = val_opt { test_val = tval; } - } - SliceLengthEqual(..) | SliceLengthGreaterOrEqual(..) => { - let (_, len) = tvec::get_base_and_len(bcx, val.val, left_ty); - test_val = len; - kind = Switch; - } - } - } - for o in &opts { - match *o { - ConstantRange(..) => { kind = Compare; break }, - SliceLengthGreaterOrEqual(..) => { kind = CompareSliceLength; break }, - _ => () - } - } - let else_cx = match kind { - NoBranch | Single => bcx, - _ => bcx.fcx.new_temp_block("match_else") - }; - let sw = if kind == Switch { - build::Switch(bcx, test_val, else_cx.llbb, opts.len()) - } else { - C_int(ccx, 0) // Placeholder for when not using a switch - }; - - let defaults = enter_default(else_cx, m, col, val); - let exhaustive = chk.is_infallible() && defaults.is_empty(); - let len = opts.len(); - - if exhaustive && kind == Switch { - build::Unreachable(else_cx); - } - - // Compile subtrees for each option - for (i, opt) in opts.iter().enumerate() { - // In some cases of range and vector pattern matching, we need to - // override the failure case so that instead of failing, it proceeds - // to try more matching. branch_chk, then, is the proper failure case - // for the current conditional branch. - let mut branch_chk = None; - let mut opt_cx = else_cx; - let debug_loc = opt.debug_loc(); - - if kind == Switch || !exhaustive || i + 1 < len { - opt_cx = bcx.fcx.new_temp_block("match_case"); - match kind { - Single => Br(bcx, opt_cx.llbb, debug_loc), - Switch => { - match opt.trans(bcx) { - SingleResult(r) => { - AddCase(sw, r.val, opt_cx.llbb); - bcx = r.bcx; - } - _ => { - bug!( - "in compile_submatch, expected \ - opt.trans() to return a SingleResult") - } - } - } - Compare | CompareSliceLength => { - let t = if kind == Compare { - left_ty - } else { - tcx.types.usize // vector length - }; - let Result { bcx: after_cx, val: matches } = { - match opt.trans(bcx) { - SingleResult(Result { bcx, val }) => { - compare_values(bcx, test_val, val, t, debug_loc) - } - RangeResult(Result { val: vbegin, .. }, - Result { bcx, val: vend }) => { - let llge = compare_scalar_types(bcx, test_val, vbegin, - t, hir::BiGe, debug_loc); - let llle = compare_scalar_types(bcx, test_val, vend, - t, hir::BiLe, debug_loc); - Result::new(bcx, And(bcx, llge, llle, DebugLoc::None)) - } - LowerBound(Result { bcx, val }) => { - Result::new(bcx, compare_scalar_types(bcx, test_val, - val, t, hir::BiGe, - debug_loc)) - } - } - }; - bcx = fcx.new_temp_block("compare_next"); - - // If none of the sub-cases match, and the current condition - // is guarded or has multiple patterns, move on to the next - // condition, if there is any, rather than falling back to - // the default. - let guarded = m[i].data.arm.guard.is_some(); - let multi_pats = m[i].pats.len() > 1; - if i + 1 < len && (guarded || multi_pats || kind == CompareSliceLength) { - branch_chk = Some(JumpToBasicBlock(bcx.llbb)); - } - CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb, debug_loc); - } - _ => () - } - } else if kind == Compare || kind == CompareSliceLength { - Br(bcx, else_cx.llbb, debug_loc); - } - - let mut size = 0; - let mut unpacked = Vec::new(); - match *opt { - Variant(disr_val, ref repr, _, _) => { - let ExtractedBlock {vals: argvals, bcx: new_bcx} = - extract_variant_args(opt_cx, &repr, disr_val, val); - size = argvals.len(); - unpacked = argvals; - opt_cx = new_bcx; - } - SliceLengthEqual(len, _) => { - let args = extract_vec_elems(opt_cx, left_ty, len, 0, val); - size = args.vals.len(); - unpacked = args.vals.clone(); - opt_cx = args.bcx; - } - SliceLengthGreaterOrEqual(before, after, _) => { - let args = extract_vec_elems(opt_cx, left_ty, before, after, val); - size = args.vals.len(); - unpacked = args.vals.clone(); - opt_cx = args.bcx; - } - ConstantValue(..) | ConstantRange(..) => () - } - let opt_ms = enter_opt(opt_cx, pat_id, m, opt, col, size, val); - let mut opt_vals: Vec<_> = unpacked.into_iter() - .map(|v|MatchInput::from_val(v)) - .collect(); - opt_vals.extend_from_slice(&vals_left[..]); - compile_submatch(opt_cx, - &opt_ms[..], - &opt_vals[..], - branch_chk.as_ref().unwrap_or(chk), - has_genuine_default); - } - - // Compile the fall-through case, if any - if !exhaustive && kind != Single { - if kind == Compare || kind == CompareSliceLength { - Br(bcx, else_cx.llbb, DebugLoc::None); - } - match chk { - // If there is only one default arm left, move on to the next - // condition explicitly rather than (eventually) falling back to - // the last default arm. - &JumpToBasicBlock(_) if defaults.len() == 1 && has_genuine_default => { - chk.handle_fail(else_cx); - } - _ => { - compile_submatch(else_cx, - &defaults[..], - &vals_left[..], - chk, - has_genuine_default); - } - } - } -} - -pub fn trans_match<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - match_expr: &hir::Expr, - discr_expr: &hir::Expr, - arms: &[hir::Arm], - dest: Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("match::trans_match"); - trans_match_inner(bcx, match_expr.id, discr_expr, arms, dest) -} - -/// Checks whether the binding in `discr` is assigned to anywhere in the expression `body` -fn is_discr_reassigned(bcx: Block, discr: &hir::Expr, body: &hir::Expr) -> bool { - let (vid, field) = match discr.node { - hir::ExprPath(..) => match bcx.tcx().expect_def(discr.id) { - Def::Local(_, vid) | Def::Upvar(_, vid, _, _) => (vid, None), - _ => return false - }, - hir::ExprField(ref base, field) => { - let vid = match bcx.tcx().expect_def_or_none(base.id) { - Some(Def::Local(_, vid)) | Some(Def::Upvar(_, vid, _, _)) => vid, - _ => return false - }; - (vid, Some(mc::NamedField(field.node))) - }, - hir::ExprTupField(ref base, field) => { - let vid = match bcx.tcx().expect_def_or_none(base.id) { - Some(Def::Local(_, vid)) | Some(Def::Upvar(_, vid, _, _)) => vid, - _ => return false - }; - (vid, Some(mc::PositionalField(field.node))) - }, - _ => return false - }; - - let mut rc = ReassignmentChecker { - node: vid, - field: field, - reassigned: false - }; - bcx.tcx().normalizing_infer_ctxt(Reveal::All).enter(|infcx| { - let mut visitor = euv::ExprUseVisitor::new(&mut rc, &infcx); - visitor.walk_expr(body); - }); - rc.reassigned -} - -struct ReassignmentChecker { - node: ast::NodeId, - field: Option, - reassigned: bool -} - -// Determine if the expression we're matching on is reassigned to within -// the body of the match's arm. -// We only care for the `mutate` callback since this check only matters -// for cases where the matched value is moved. -impl<'tcx> euv::Delegate<'tcx> for ReassignmentChecker { - fn consume(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: euv::ConsumeMode) {} - fn matched_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::MatchMode) {} - fn consume_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::ConsumeMode) {} - fn borrow(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: ty::Region, - _: ty::BorrowKind, _: euv::LoanCause) {} - fn decl_without_init(&mut self, _: ast::NodeId, _: Span) {} - - fn mutate(&mut self, _: ast::NodeId, _: Span, cmt: mc::cmt, _: euv::MutateMode) { - let cmt_id = |cmt: &mc::cmt| match cmt.cat { - Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, ..}, ..}) | - Categorization::Local(vid) => Some(vid), - Categorization::Interior(ref base_cmt, mc::InteriorField(_)) => Some(base_cmt.id), - _ => None - }; - match cmt.cat { - Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, .. }, .. }) | - Categorization::Local(vid) => self.reassigned |= self.node == vid, - ref cat => { - let mut cat = cat; - while let &Categorization::Interior(ref base_cmt, mc::InteriorField(field)) = cat { - if let Some(vid) = cmt_id(base_cmt) { - if self.node == vid && (self.field.is_none() || self.field == Some(field)) { - self.reassigned = true; - return; - } - } - cat = &base_cmt.cat; - } - } - } - } -} - -fn create_bindings_map<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pat: &hir::Pat, - discr: &hir::Expr, body: &hir::Expr) - -> BindingsMap<'tcx> { - // Create the bindings map, which is a mapping from each binding name - // to an alloca() that will be the value for that local variable. - // Note that we use the names because each binding will have many ids - // from the various alternatives. - let ccx = bcx.ccx(); - let reassigned = is_discr_reassigned(bcx, discr, body); - let mut bindings_map = FnvHashMap(); - pat_bindings(&pat, |bm, p_id, span, path1| { - let name = path1.node; - let variable_ty = node_id_type(bcx, p_id); - let llvariable_ty = type_of::type_of(ccx, variable_ty); - let tcx = bcx.tcx(); - let param_env = tcx.empty_parameter_environment(); - - let llmatch; - let trmode; - let moves_by_default = variable_ty.moves_by_default(tcx, ¶m_env, span); - match bm { - hir::BindByValue(_) if !moves_by_default || reassigned => - { - llmatch = alloca(bcx, llvariable_ty.ptr_to(), "__llmatch"); - let llcopy = alloca(bcx, llvariable_ty, &bcx.name(name)); - trmode = if moves_by_default { - TrByMoveIntoCopy(llcopy) - } else { - TrByCopy(llcopy) - }; - } - hir::BindByValue(_) => { - // in this case, the final type of the variable will be T, - // but during matching we need to store a *T as explained - // above - llmatch = alloca(bcx, llvariable_ty.ptr_to(), &bcx.name(name)); - trmode = TrByMoveRef; - } - hir::BindByRef(_) => { - llmatch = alloca(bcx, llvariable_ty, &bcx.name(name)); - trmode = TrByRef; - } - }; - bindings_map.insert(name, BindingInfo { - llmatch: llmatch, - trmode: trmode, - id: p_id, - span: span, - ty: variable_ty - }); - }); - return bindings_map; -} - -fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>, - match_id: ast::NodeId, - discr_expr: &hir::Expr, - arms: &[hir::Arm], - dest: Dest) -> Block<'blk, 'tcx> { - let _icx = push_ctxt("match::trans_match_inner"); - let fcx = scope_cx.fcx; - let mut bcx = scope_cx; - let tcx = bcx.tcx(); - - let discr_datum = unpack_datum!(bcx, expr::trans_to_lvalue(bcx, discr_expr, - "match")); - if bcx.unreachable.get() { - return bcx; - } - - let t = node_id_type(bcx, discr_expr.id); - let chk = if t.is_uninhabited(tcx) { - Unreachable - } else { - Infallible - }; - - let arm_datas: Vec = arms.iter().map(|arm| ArmData { - bodycx: fcx.new_id_block("case_body", arm.body.id), - arm: arm, - bindings_map: create_bindings_map(bcx, &arm.pats[0], discr_expr, &arm.body) - }).collect(); - - let mut pat_renaming_map = if scope_cx.sess().opts.debuginfo != NoDebugInfo { - Some(FnvHashMap()) - } else { - None - }; - - let arm_pats: Vec>> = { - let mut static_inliner = StaticInliner::new(scope_cx.tcx(), - pat_renaming_map.as_mut()); - arm_datas.iter().map(|arm_data| { - arm_data.arm.pats.iter().map(|p| static_inliner.fold_pat((*p).clone())).collect() - }).collect() - }; - - let mut matches = Vec::new(); - for (arm_data, pats) in arm_datas.iter().zip(&arm_pats) { - matches.extend(pats.iter().map(|p| Match { - pats: vec![&p], - data: arm_data, - bound_ptrs: Vec::new(), - pat_renaming_map: pat_renaming_map.as_ref() - })); - } - - // `compile_submatch` works one column of arm patterns a time and - // then peels that column off. So as we progress, it may become - // impossible to tell whether we have a genuine default arm, i.e. - // `_ => foo` or not. Sometimes it is important to know that in order - // to decide whether moving on to the next condition or falling back - // to the default arm. - let has_default = arms.last().map_or(false, |arm| { - arm.pats.len() == 1 - && arm.pats.last().unwrap().node == PatKind::Wild - }); - - compile_submatch(bcx, &matches[..], &[discr_datum.match_input()], &chk, has_default); - - let mut arm_cxs = Vec::new(); - for arm_data in &arm_datas { - let mut bcx = arm_data.bodycx; - - // insert bindings into the lllocals map and add cleanups - let cs = fcx.push_custom_cleanup_scope(); - bcx = insert_lllocals(bcx, &arm_data.bindings_map, Some(cleanup::CustomScope(cs))); - bcx = expr::trans_into(bcx, &arm_data.arm.body, dest); - bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, cs); - arm_cxs.push(bcx); - } - - bcx = scope_cx.fcx.join_blocks(match_id, &arm_cxs[..]); - return bcx; -} - -/// Generates code for a local variable declaration like `let ;` or `let = -/// `. -pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - local: &hir::Local) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("match::store_local"); - let mut bcx = bcx; - let tcx = bcx.tcx(); - let pat = &local.pat; - - fn create_dummy_locals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - pat: &hir::Pat) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("create_dummy_locals"); - // create dummy memory for the variables if we have no - // value to store into them immediately - let tcx = bcx.tcx(); - pat_bindings(pat, |_, p_id, _, path1| { - let scope = cleanup::var_scope(tcx, p_id); - bcx = mk_binding_alloca( - bcx, p_id, path1.node, scope, (), - "_match::store_local::create_dummy_locals", - |(), bcx, Datum { val: llval, ty, kind }| { - // Dummy-locals start out uninitialized, so set their - // drop-flag hints (if any) to "moved." - if let Some(hint) = kind.dropflag_hint(bcx) { - let moved_hint = adt::DTOR_MOVED_HINT; - debug!("store moved_hint={} for hint={:?}, uninitialized dummy", - moved_hint, hint); - Store(bcx, C_u8(bcx.fcx.ccx, moved_hint), hint.to_value().value()); - } - - if kind.drop_flag_info.must_zero() { - // if no drop-flag hint, or the hint requires - // we maintain the embedded drop-flag, then - // mark embedded drop-flag(s) as moved - // (i.e. "already dropped"). - drop_done_fill_mem(bcx, llval, ty); - } - bcx - }); - }); - bcx - } - - match local.init { - Some(ref init_expr) => { - // Optimize the "let x = expr" case. This just writes - // the result of evaluating `expr` directly into the alloca - // for `x`. Often the general path results in similar or the - // same code post-optimization, but not always. In particular, - // in unsafe code, you can have expressions like - // - // let x = intrinsics::uninit(); - // - // In such cases, the more general path is unsafe, because - // it assumes it is matching against a valid value. - if let Some(name) = simple_name(pat) { - let var_scope = cleanup::var_scope(tcx, local.id); - return mk_binding_alloca( - bcx, pat.id, name, var_scope, (), - "_match::store_local", - |(), bcx, Datum { val: v, .. }| expr::trans_into(bcx, &init_expr, - expr::SaveIn(v))); - } - - // General path. - let init_datum = - unpack_datum!(bcx, expr::trans_to_lvalue(bcx, &init_expr, "let")); - if bcx.sess().asm_comments() { - add_comment(bcx, "creating zeroable ref llval"); - } - let var_scope = cleanup::var_scope(tcx, local.id); - bind_irrefutable_pat(bcx, pat, init_datum.match_input(), var_scope) - } - None => { - create_dummy_locals(bcx, pat) - } - } -} - -fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, - p_id: ast::NodeId, - name: ast::Name, - cleanup_scope: cleanup::ScopeId, - arg: A, - caller_name: &'static str, - populate: F) - -> Block<'blk, 'tcx> where - F: FnOnce(A, Block<'blk, 'tcx>, Datum<'tcx, Lvalue>) -> Block<'blk, 'tcx>, -{ - let var_ty = node_id_type(bcx, p_id); - - // Allocate memory on stack for the binding. - let llval = alloc_ty(bcx, var_ty, &bcx.name(name)); - let lvalue = Lvalue::new_with_hint(caller_name, bcx, p_id, HintKind::DontZeroJustUse); - let datum = Datum::new(llval, var_ty, lvalue); - - debug!("mk_binding_alloca cleanup_scope={:?} llval={:?} var_ty={:?}", - cleanup_scope, Value(llval), var_ty); - - // Subtle: be sure that we *populate* the memory *before* - // we schedule the cleanup. - call_lifetime_start(bcx, llval); - let bcx = populate(arg, bcx, datum); - bcx.fcx.schedule_lifetime_end(cleanup_scope, llval); - bcx.fcx.schedule_drop_mem(cleanup_scope, llval, var_ty, lvalue.dropflag_hint(bcx)); - - // Now that memory is initialized and has cleanup scheduled, - // insert datum into the local variable map. - bcx.fcx.lllocals.borrow_mut().insert(p_id, datum); - bcx -} - -/// A simple version of the pattern matching code that only handles -/// irrefutable patterns. This is used in let/argument patterns, -/// not in match statements. Unifying this code with the code above -/// sounds nice, but in practice it produces very inefficient code, -/// since the match code is so much more general. In most cases, -/// LLVM is able to optimize the code, but it causes longer compile -/// times and makes the generated code nigh impossible to read. -/// -/// # Arguments -/// - bcx: starting basic block context -/// - pat: the irrefutable pattern being matched. -/// - val: the value being matched -- must be an lvalue (by ref, with cleanup) -pub fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - pat: &hir::Pat, - val: MatchInput, - cleanup_scope: cleanup::ScopeId) - -> Block<'blk, 'tcx> { - debug!("bind_irrefutable_pat(bcx={}, pat={:?}, val={:?})", - bcx.to_str(), pat, val); - - if bcx.sess().asm_comments() { - add_comment(bcx, &format!("bind_irrefutable_pat(pat={:?})", - pat)); - } - - let _indenter = indenter(); - - let _icx = push_ctxt("match::bind_irrefutable_pat"); - let mut bcx = bcx; - let tcx = bcx.tcx(); - let ccx = bcx.ccx(); - match pat.node { - PatKind::Binding(pat_binding_mode, ref path1, ref inner) => { - // Allocate the stack slot where the value of this - // binding will live and place it into the appropriate - // map. - bcx = mk_binding_alloca(bcx, pat.id, path1.node, cleanup_scope, (), - "_match::bind_irrefutable_pat", - |(), bcx, Datum { val: llval, ty, kind: _ }| { - match pat_binding_mode { - hir::BindByValue(_) => { - // By value binding: move the value that `val` - // points at into the binding's stack slot. - let d = val.to_datum(ty); - d.store_to(bcx, llval) - } - - hir::BindByRef(_) => { - // By ref binding: the value of the variable - // is the pointer `val` itself or fat pointer referenced by `val` - if type_is_fat_ptr(bcx.tcx(), ty) { - expr::copy_fat_ptr(bcx, val.val, llval); - } - else { - Store(bcx, val.val, llval); - } - - bcx - } - } - }); - - if let Some(ref inner_pat) = *inner { - bcx = bind_irrefutable_pat(bcx, &inner_pat, val, cleanup_scope); - } - } - PatKind::TupleStruct(_, ref sub_pats, ddpos) => { - match bcx.tcx().expect_def(pat.id) { - Def::Variant(enum_id, var_id) => { - let repr = adt::represent_node(bcx, pat.id); - let vinfo = ccx.tcx().lookup_adt_def(enum_id).variant_with_id(var_id); - let args = extract_variant_args(bcx, - &repr, - Disr::from(vinfo.disr_val), - val); - for (i, subpat) in sub_pats.iter() - .enumerate_and_adjust(vinfo.fields.len(), ddpos) { - bcx = bind_irrefutable_pat( - bcx, - subpat, - MatchInput::from_val(args.vals[i]), - cleanup_scope); - } - } - Def::Struct(..) => { - let expected_len = match *ccx.tcx().pat_ty(&pat) { - ty::TyS{sty: ty::TyStruct(adt_def, _), ..} => { - adt_def.struct_variant().fields.len() - } - ref ty => { - span_bug!(pat.span, "tuple struct pattern unexpected type {:?}", ty); - } - }; - - let repr = adt::represent_node(bcx, pat.id); - let val = adt::MaybeSizedValue::sized(val.val); - for (i, elem) in sub_pats.iter().enumerate_and_adjust(expected_len, ddpos) { - let fldptr = adt::trans_field_ptr(bcx, &repr, val, Disr(0), i); - bcx = bind_irrefutable_pat( - bcx, - &elem, - MatchInput::from_val(fldptr), - cleanup_scope); - } - } - _ => { - // Nothing to do here. - } - } - } - PatKind::Struct(_, ref fields, _) => { - let tcx = bcx.tcx(); - let pat_ty = node_id_type(bcx, pat.id); - let pat_repr = adt::represent_type(bcx.ccx(), pat_ty); - let pat_v = VariantInfo::of_node(tcx, pat_ty, pat.id); - - let val = if type_is_sized(tcx, pat_ty) { - adt::MaybeSizedValue::sized(val.val) - } else { - let data = Load(bcx, expr::get_dataptr(bcx, val.val)); - let meta = Load(bcx, expr::get_meta(bcx, val.val)); - adt::MaybeSizedValue::unsized_(data, meta) - }; - - for f in fields { - let name = f.node.name; - let field_idx = pat_v.field_index(name); - let mut fldptr = adt::trans_field_ptr( - bcx, - &pat_repr, - val, - pat_v.discr, - field_idx); - - let fty = pat_v.fields[field_idx].1; - // If it's not sized, then construct a fat pointer instead of - // a regular one - if !type_is_sized(tcx, fty) { - let scratch = alloc_ty(bcx, fty, "__struct_field_fat_ptr"); - debug!("Creating fat pointer {:?}", Value(scratch)); - Store(bcx, fldptr, expr::get_dataptr(bcx, scratch)); - Store(bcx, val.meta, expr::get_meta(bcx, scratch)); - fldptr = scratch; - } - bcx = bind_irrefutable_pat(bcx, - &f.node.pat, - MatchInput::from_val(fldptr), - cleanup_scope); - } - } - PatKind::Tuple(ref elems, ddpos) => { - match tcx.node_id_to_type(pat.id).sty { - ty::TyTuple(ref tys) => { - let repr = adt::represent_node(bcx, pat.id); - let val = adt::MaybeSizedValue::sized(val.val); - for (i, elem) in elems.iter().enumerate_and_adjust(tys.len(), ddpos) { - let fldptr = adt::trans_field_ptr(bcx, &repr, val, Disr(0), i); - bcx = bind_irrefutable_pat( - bcx, - &elem, - MatchInput::from_val(fldptr), - cleanup_scope); - } - } - ref sty => span_bug!(pat.span, "unexpected type for tuple pattern: {:?}", sty), - } - } - PatKind::Box(ref inner) => { - let pat_ty = node_id_type(bcx, inner.id); - // Pass along DSTs as fat pointers. - let val = if type_is_fat_ptr(tcx, pat_ty) { - // We need to check for this, as the pattern could be binding - // a fat pointer by-value. - if let PatKind::Binding(hir::BindByRef(..),_,_) = inner.node { - val.val - } else { - Load(bcx, val.val) - } - } else if type_is_sized(tcx, pat_ty) { - Load(bcx, val.val) - } else { - val.val - }; - bcx = bind_irrefutable_pat( - bcx, &inner, MatchInput::from_val(val), cleanup_scope); - } - PatKind::Ref(ref inner, _) => { - let pat_ty = node_id_type(bcx, inner.id); - // Pass along DSTs as fat pointers. - let val = if type_is_fat_ptr(tcx, pat_ty) { - // We need to check for this, as the pattern could be binding - // a fat pointer by-value. - if let PatKind::Binding(hir::BindByRef(..),_,_) = inner.node { - val.val - } else { - Load(bcx, val.val) - } - } else if type_is_sized(tcx, pat_ty) { - Load(bcx, val.val) - } else { - val.val - }; - bcx = bind_irrefutable_pat( - bcx, - &inner, - MatchInput::from_val(val), - cleanup_scope); - } - PatKind::Vec(ref before, ref slice, ref after) => { - let pat_ty = node_id_type(bcx, pat.id); - let mut extracted = extract_vec_elems(bcx, pat_ty, before.len(), after.len(), val); - match slice { - &Some(_) => { - extracted.vals.insert( - before.len(), - bind_subslice_pat(bcx, pat.id, val, before.len(), after.len()) - ); - } - &None => () - } - bcx = before - .iter() - .chain(slice.iter()) - .chain(after.iter()) - .zip(extracted.vals) - .fold(bcx, |bcx, (inner, elem)| { - bind_irrefutable_pat( - bcx, - &inner, - MatchInput::from_val(elem), - cleanup_scope) - }); - } - PatKind::Path(..) | PatKind::Wild | - PatKind::Lit(..) | PatKind::Range(..) => () - } - return bcx; -} diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 3a7fde6a36..683ad76952 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -20,9 +20,11 @@ use cabi_arm; use cabi_aarch64; use cabi_powerpc; use cabi_powerpc64; +use cabi_s390x; use cabi_mips; +use cabi_mips64; use cabi_asmjs; -use machine::{llalign_of_min, llsize_of, llsize_of_real, llsize_of_store}; +use machine::{llalign_of_min, llsize_of, llsize_of_alloc}; use type_::Type; use type_of; @@ -34,6 +36,7 @@ use std::cmp; pub use syntax::abi::Abi; pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; +use rustc::ty::layout::Layout; #[derive(Clone, Copy, PartialEq, Debug)] enum ArgKind { @@ -99,7 +102,7 @@ impl ArgType { // Wipe old attributes, likely not valid through indirection. self.attrs = llvm::Attributes::default(); - let llarg_sz = llsize_of_real(ccx, self.ty); + let llarg_sz = llsize_of_alloc(ccx, self.ty); // For non-immediate arguments the callee gets its own copy of // the value on the stack, so there are no aliases. It's also @@ -197,7 +200,7 @@ impl ArgType { base::call_memcpy(bcx, bcx.pointercast(dst, Type::i8p(ccx)), bcx.pointercast(llscratch, Type::i8p(ccx)), - C_uint(ccx, llsize_of_store(ccx, self.ty)), + C_uint(ccx, llsize_of_alloc(ccx, self.ty)), cmp::min(llalign_of_min(ccx, self.ty), llalign_of_min(ccx, ty)) as u32); @@ -269,6 +272,7 @@ impl FnType { Vectorcall => llvm::X86_VectorCall, C => llvm::CCallConv, Win64 => llvm::X86_64_Win64, + SysV64 => llvm::X86_64_SysV, // These API constants ought to be more specific... Cdecl => llvm::CCallConv, @@ -298,6 +302,9 @@ impl FnType { let win_x64_gnu = target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu"; + let linux_s390x = target.target_os == "linux" + && target.arch == "s390x" + && target.target_env == "gnu"; let rust_abi = match abi { RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true, _ => false @@ -315,10 +322,17 @@ impl FnType { if ty.is_integral() { arg.signedness = Some(ty.is_signed()); } - if llsize_of_real(ccx, arg.ty) == 0 { + // Rust enum types that map onto C enums also need to follow + // the target ABI zero-/sign-extension rules. + if let Layout::CEnum { signed, .. } = *ccx.layout_of(ty) { + arg.signedness = Some(signed); + } + if llsize_of_alloc(ccx, arg.ty) == 0 { // For some forsaken reason, x86_64-pc-windows-gnu // doesn't ignore zero-sized struct arguments. - if is_return || rust_abi || !win_x64_gnu { + // The same is true for s390x-unknown-linux-gnu. + if is_return || rust_abi || + (!win_x64_gnu && !linux_s390x) { arg.ignore(); } } @@ -344,7 +358,7 @@ impl FnType { ty::TyRef(_, ty::TypeAndMut { ty, .. }) | ty::TyBox(ty) => { let llty = type_of::sizing_type_of(ccx, ty); - let llsz = llsize_of_real(ccx, llty); + let llsz = llsize_of_alloc(ccx, llty); ret.attrs.set_dereferenceable(llsz); } _ => {} @@ -413,7 +427,7 @@ impl FnType { } else { if let Some(inner) = rust_ptr_attrs(ty, &mut arg) { let llty = type_of::sizing_type_of(ccx, inner); - let llsz = llsize_of_real(ccx, llty); + let llsz = llsize_of_alloc(ccx, llty); arg.attrs.set_dereferenceable(llsz); } args.push(arg); @@ -455,8 +469,8 @@ impl FnType { return; } - let size = llsize_of_real(ccx, llty); - if size > llsize_of_real(ccx, ccx.int_type()) { + let size = llsize_of_alloc(ccx, llty); + if size > llsize_of_alloc(ccx, ccx.int_type()) { arg.make_indirect(ccx); } else if size > 0 { // We want to pass small aggregates as immediates, but using @@ -483,7 +497,9 @@ impl FnType { match &ccx.sess().target.target.arch[..] { "x86" => cabi_x86::compute_abi_info(ccx, self), - "x86_64" => if ccx.sess().target.target.options.is_like_windows { + "x86_64" => if abi == Abi::SysV64 { + cabi_x86_64::compute_abi_info(ccx, self); + } else if abi == Abi::Win64 || ccx.sess().target.target.options.is_like_windows { cabi_x86_win64::compute_abi_info(ccx, self); } else { cabi_x86_64::compute_abi_info(ccx, self); @@ -498,8 +514,10 @@ impl FnType { cabi_arm::compute_abi_info(ccx, self, flavor); }, "mips" => cabi_mips::compute_abi_info(ccx, self), + "mips64" => cabi_mips64::compute_abi_info(ccx, self), "powerpc" => cabi_powerpc::compute_abi_info(ccx, self), "powerpc64" => cabi_powerpc64::compute_abi_info(ccx, self), + "s390x" => cabi_s390x::compute_abi_info(ccx, self), "asmjs" => cabi_asmjs::compute_abi_info(ccx, self), a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a)) } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 5a88385a46..19337ba021 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -41,116 +41,32 @@ //! used unboxed and any field can have pointers (including mutable) //! taken to it, implementing them for Rust seems difficult. -pub use self::Repr::*; use super::Disr; use std; -use std::rc::Rc; use llvm::{ValueRef, True, IntEQ, IntNE}; -use rustc::ty::subst; -use rustc::ty::{self, Ty, TyCtxt}; -use syntax::ast; +use rustc::ty::layout; +use rustc::ty::{self, Ty, AdtKind}; use syntax::attr; -use syntax::attr::IntType; -use _match; -use abi::FAT_PTR_ADDR; -use base::{self, InitAlloca}; use build::*; -use cleanup; -use cleanup::CleanupMethods; use common::*; -use datum; use debuginfo::DebugLoc; use glue; +use base; use machine; use monomorphize; use type_::Type; use type_of; use value::Value; -type Hint = attr::ReprAttr; - -// Representation of the context surrounding an unsized type. I want -// to be able to track the drop flags that are injected by trans. -#[derive(Clone, Copy, PartialEq, Debug)] -pub struct TypeContext { - prefix: Type, - needs_drop_flag: bool, -} - -impl TypeContext { - pub fn prefix(&self) -> Type { self.prefix } - pub fn needs_drop_flag(&self) -> bool { self.needs_drop_flag } - - fn direct(t: Type) -> TypeContext { - TypeContext { prefix: t, needs_drop_flag: false } - } - fn may_need_drop_flag(t: Type, needs_drop_flag: bool) -> TypeContext { - TypeContext { prefix: t, needs_drop_flag: needs_drop_flag } - } -} - -/// Representations. -#[derive(Eq, PartialEq, Debug)] -pub enum Repr<'tcx> { - /// C-like enums; basically an int. - CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType) - /// Single-case variants, and structs/tuples/records. - /// - /// Structs with destructors need a dynamic destroyedness flag to - /// avoid running the destructor too many times; this is included - /// in the `Struct` if present. - /// (The flag if nonzero, represents the initialization value to use; - /// if zero, then use no flag at all.) - Univariant(Struct<'tcx>, u8), - /// General-case enums: for each case there is a struct, and they - /// all start with a field for the discriminant. - /// - /// Types with destructors need a dynamic destroyedness flag to - /// avoid running the destructor too many times; the last argument - /// indicates whether such a flag is present. - /// (The flag, if nonzero, represents the initialization value to use; - /// if zero, then use no flag at all.) - General(IntType, Vec>, u8), - /// Two cases distinguished by a nullable pointer: the case with discriminant - /// `nndiscr` must have single field which is known to be nonnull due to its type. - /// The other case is known to be zero sized. Hence we represent the enum - /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant, - /// otherwise it indicates the other case. - RawNullablePointer { - nndiscr: Disr, - nnty: Ty<'tcx>, - nullfields: Vec> - }, - /// Two cases distinguished by a nullable pointer: the case with discriminant - /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th - /// field is known to be nonnull due to its type; if that field is null, then - /// it represents the other case, which is inhabited by at most one value - /// (and all other fields are undefined/unused). - /// - /// For example, `std::option::Option` instantiated at a safe pointer type - /// is represented such that `None` is a null pointer and `Some` is the - /// identity function. - StructWrappedNullablePointer { - nonnull: Struct<'tcx>, - nndiscr: Disr, - discrfield: DiscrField, - nullfields: Vec>, - } +#[derive(Copy, Clone, PartialEq)] +pub enum BranchKind { + Switch, + Single } -/// For structs, and struct-like parts of anything fancier. -#[derive(Eq, PartialEq, Debug)] -pub struct Struct<'tcx> { - // If the struct is DST, then the size and alignment do not take into - // account the unsized fields of the struct. - pub size: u64, - pub align: u32, - pub sized: bool, - pub packed: bool, - pub fields: Vec>, -} +type Hint = attr::ReprAttr; #[derive(Copy, Clone)] pub struct MaybeSizedValue { @@ -178,540 +94,33 @@ impl MaybeSizedValue { } } -/// Convenience for `represent_type`. There should probably be more or -/// these, for places in trans where the `Ty` isn't directly -/// available. -pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - node: ast::NodeId) -> Rc> { - represent_type(bcx.ccx(), node_id_type(bcx, node)) -} - -/// Decides how to represent a given type. -pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>) - -> Rc> { - debug!("Representing: {}", t); - if let Some(repr) = cx.adt_reprs().borrow().get(&t) { - return repr.clone(); - } - - let repr = Rc::new(represent_type_uncached(cx, t)); - debug!("Represented as: {:?}", repr); - cx.adt_reprs().borrow_mut().insert(t, repr.clone()); - repr -} - -const fn repeat_u8_as_u32(val: u8) -> u32 { - (val as u32) << 24 | (val as u32) << 16 | (val as u32) << 8 | val as u32 -} - -const fn repeat_u8_as_u64(val: u8) -> u64 { - (repeat_u8_as_u32(val) as u64) << 32 | repeat_u8_as_u32(val) as u64 -} - -/// `DTOR_NEEDED_HINT` is a stack-local hint that just means -/// "we do not know whether the destructor has run or not; check the -/// drop-flag embedded in the value itself." -pub const DTOR_NEEDED_HINT: u8 = 0x3d; - -/// `DTOR_MOVED_HINT` is a stack-local hint that means "this value has -/// definitely been moved; you do not need to run its destructor." -/// -/// (However, for now, such values may still end up being explicitly -/// zeroed by the generated code; this is the distinction between -/// `datum::DropFlagInfo::ZeroAndMaintain` versus -/// `datum::DropFlagInfo::DontZeroJustUse`.) -pub const DTOR_MOVED_HINT: u8 = 0x2d; - -pub const DTOR_NEEDED: u8 = 0xd4; -#[allow(dead_code)] -pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64(DTOR_NEEDED); - -pub const DTOR_DONE: u8 = 0x1d; -#[allow(dead_code)] -pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64(DTOR_DONE); - -fn dtor_to_init_u8(dtor: bool) -> u8 { - if dtor { DTOR_NEEDED } else { 0 } -} - -pub trait GetDtorType<'tcx> { fn dtor_type(self) -> Ty<'tcx>; } -impl<'a, 'tcx> GetDtorType<'tcx> for TyCtxt<'a, 'tcx, 'tcx> { - fn dtor_type(self) -> Ty<'tcx> { self.types.u8 } -} - -fn dtor_active(flag: u8) -> bool { - flag != 0 -} - -fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>) -> Repr<'tcx> { +/// Given an enum, struct, closure, or tuple, extracts fields. +/// Treats closures as a struct with one variant. +/// `empty_if_no_variants` is a switch to deal with empty enums. +/// If true, `variant_index` is disregarded and an empty Vec returned in this case. +fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, + variant_index: usize, + empty_if_no_variants: bool) -> Vec> { match t.sty { - ty::TyTuple(ref elems) => { - Univariant(mk_struct(cx, &elems[..], false, t), 0) - } - ty::TyStruct(def, substs) => { - let mut ftys = def.struct_variant().fields.iter().map(|field| { - monomorphize::field_ty(cx.tcx(), substs, field) - }).collect::>(); - let packed = cx.tcx().lookup_packed(def.did); - // FIXME(16758) don't add a drop flag to unsized structs, as it - // won't actually be in the location we say it is because it'll be after - // the unsized field. Several other pieces of code assume that the unsized - // field is definitely the last one. - let dtor = def.dtor_kind().has_drop_flag() && type_is_sized(cx.tcx(), t); - if dtor { - ftys.push(cx.tcx().dtor_type()); - } - - Univariant(mk_struct(cx, &ftys[..], packed, t), dtor_to_init_u8(dtor)) - } - ty::TyClosure(_, ref substs) => { - Univariant(mk_struct(cx, &substs.upvar_tys, false, t), 0) - } - ty::TyEnum(def, substs) => { - let cases = get_cases(cx.tcx(), def, substs); - let hint = *cx.tcx().lookup_repr_hints(def.did).get(0) - .unwrap_or(&attr::ReprAny); - - let dtor = def.dtor_kind().has_drop_flag(); - - if cases.is_empty() { - // Uninhabitable; represent as unit - // (Typechecking will reject discriminant-sizing attrs.) - assert_eq!(hint, attr::ReprAny); - let ftys = if dtor { vec!(cx.tcx().dtor_type()) } else { vec!() }; - return Univariant(mk_struct(cx, &ftys[..], false, t), - dtor_to_init_u8(dtor)); - } - - if !dtor && cases.iter().all(|c| c.tys.is_empty()) { - // All bodies empty -> intlike - let discrs: Vec<_> = cases.iter().map(|c| Disr::from(c.discr)).collect(); - let bounds = IntBounds { - ulo: discrs.iter().min().unwrap().0, - uhi: discrs.iter().max().unwrap().0, - slo: discrs.iter().map(|n| n.0 as i64).min().unwrap(), - shi: discrs.iter().map(|n| n.0 as i64).max().unwrap() - }; - return mk_cenum(cx, hint, &bounds); - } - - // Since there's at least one - // non-empty body, explicit discriminants should have - // been rejected by a checker before this point. - if !cases.iter().enumerate().all(|(i,c)| c.discr == Disr::from(i)) { - bug!("non-C-like enum {} with specified discriminants", - cx.tcx().item_path_str(def.did)); - } - - if cases.len() == 1 && hint == attr::ReprAny { - // Equivalent to a struct/tuple/newtype. - let mut ftys = cases[0].tys.clone(); - if dtor { ftys.push(cx.tcx().dtor_type()); } - return Univariant(mk_struct(cx, &ftys[..], false, t), - dtor_to_init_u8(dtor)); - } - - if !dtor && cases.len() == 2 && hint == attr::ReprAny { - // Nullable pointer optimization - let mut discr = 0; - while discr < 2 { - if cases[1 - discr].is_zerolen(cx, t) { - let st = mk_struct(cx, &cases[discr].tys, - false, t); - match cases[discr].find_ptr(cx) { - Some(ref df) if df.len() == 1 && st.fields.len() == 1 => { - return RawNullablePointer { - nndiscr: Disr::from(discr), - nnty: st.fields[0], - nullfields: cases[1 - discr].tys.clone() - }; - } - Some(mut discrfield) => { - discrfield.push(0); - discrfield.reverse(); - return StructWrappedNullablePointer { - nndiscr: Disr::from(discr), - nonnull: st, - discrfield: discrfield, - nullfields: cases[1 - discr].tys.clone() - }; - } - None => {} - } - } - discr += 1; - } - } - - // The general case. - assert!((cases.len() - 1) as i64 >= 0); - let bounds = IntBounds { ulo: 0, uhi: (cases.len() - 1) as u64, - slo: 0, shi: (cases.len() - 1) as i64 }; - let min_ity = range_to_inttype(cx, hint, &bounds); - - // Create the set of structs that represent each variant - // Use the minimum integer type we figured out above - let fields : Vec<_> = cases.iter().map(|c| { - let mut ftys = vec!(ty_of_inttype(cx.tcx(), min_ity)); - ftys.extend_from_slice(&c.tys); - if dtor { ftys.push(cx.tcx().dtor_type()); } - mk_struct(cx, &ftys, false, t) - }).collect(); - - - // Check to see if we should use a different type for the - // discriminant. If the overall alignment of the type is - // the same as the first field in each variant, we can safely use - // an alignment-sized type. - // We increase the size of the discriminant to avoid LLVM copying - // padding when it doesn't need to. This normally causes unaligned - // load/stores and excessive memcpy/memset operations. By using a - // bigger integer size, LLVM can be sure about it's contents and - // won't be so conservative. - // This check is needed to avoid increasing the size of types when - // the alignment of the first field is smaller than the overall - // alignment of the type. - let (_, align) = union_size_and_align(&fields); - let mut use_align = true; - for st in &fields { - // Get the first non-zero-sized field - let field = st.fields.iter().skip(1).filter(|ty| { - let t = type_of::sizing_type_of(cx, **ty); - machine::llsize_of_real(cx, t) != 0 || - // This case is only relevant for zero-sized types with large alignment - machine::llalign_of_min(cx, t) != 1 - }).next(); - - if let Some(field) = field { - let field_align = type_of::align_of(cx, *field); - if field_align != align { - use_align = false; - break; - } - } - } - - // If the alignment is smaller than the chosen discriminant size, don't use the - // alignment as the final size. - let min_ty = ll_inttype(&cx, min_ity); - let min_size = machine::llsize_of_real(cx, min_ty); - if (align as u64) < min_size { - use_align = false; - } - - let ity = if use_align { - // Use the overall alignment - match align { - 1 => attr::UnsignedInt(ast::UintTy::U8), - 2 => attr::UnsignedInt(ast::UintTy::U16), - 4 => attr::UnsignedInt(ast::UintTy::U32), - 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 => - attr::UnsignedInt(ast::UintTy::U64), - _ => min_ity // use min_ity as a fallback - } - } else { - min_ity - }; - - let fields : Vec<_> = cases.iter().map(|c| { - let mut ftys = vec!(ty_of_inttype(cx.tcx(), ity)); - ftys.extend_from_slice(&c.tys); - if dtor { ftys.push(cx.tcx().dtor_type()); } - mk_struct(cx, &ftys[..], false, t) - }).collect(); - - ensure_enum_fits_in_address_space(cx, &fields[..], t); - - General(ity, fields, dtor_to_init_u8(dtor)) - } - _ => bug!("adt::represent_type called on non-ADT type: {}", t) - } -} - -// this should probably all be in ty -struct Case<'tcx> { - discr: Disr, - tys: Vec> -} - -/// This represents the (GEP) indices to follow to get to the discriminant field -pub type DiscrField = Vec; - -fn find_discr_field_candidate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - mut path: DiscrField) - -> Option { - match ty.sty { - // Fat &T/&mut T/Box i.e. T is [T], str, or Trait - ty::TyRef(_, ty::TypeAndMut { ty, .. }) | ty::TyBox(ty) if !type_is_sized(tcx, ty) => { - path.push(FAT_PTR_ADDR); - Some(path) + ty::TyAdt(ref def, _) if def.variants.len() == 0 && empty_if_no_variants => { + Vec::default() }, - - // Regular thin pointer: &T/&mut T/Box - ty::TyRef(..) | ty::TyBox(..) => Some(path), - - // Function pointer: `fn() -> i32` - ty::TyFnPtr(_) => Some(path), - - // Is this the NonZero lang item wrapping a pointer or integer type? - ty::TyStruct(def, substs) if Some(def.did) == tcx.lang_items.non_zero() => { - let nonzero_fields = &def.struct_variant().fields; - assert_eq!(nonzero_fields.len(), 1); - let field_ty = monomorphize::field_ty(tcx, substs, &nonzero_fields[0]); - match field_ty.sty { - ty::TyRawPtr(ty::TypeAndMut { ty, .. }) if !type_is_sized(tcx, ty) => { - path.extend_from_slice(&[0, FAT_PTR_ADDR]); - Some(path) - }, - ty::TyRawPtr(..) | ty::TyInt(..) | ty::TyUint(..) => { - path.push(0); - Some(path) - }, - _ => None - } - }, - - // Perhaps one of the fields of this struct is non-zero - // let's recurse and find out - ty::TyStruct(def, substs) => { - for (j, field) in def.struct_variant().fields.iter().enumerate() { - let field_ty = monomorphize::field_ty(tcx, substs, field); - if let Some(mut fpath) = find_discr_field_candidate(tcx, field_ty, path.clone()) { - fpath.push(j); - return Some(fpath); - } - } - None + ty::TyAdt(ref def, ref substs) => { + def.variants[variant_index].fields.iter().map(|f| { + monomorphize::field_ty(cx.tcx(), substs, f) + }).collect::>() }, - - // Perhaps one of the upvars of this struct is non-zero - // Let's recurse and find out! - ty::TyClosure(_, ref substs) => { - for (j, &ty) in substs.upvar_tys.iter().enumerate() { - if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) { - fpath.push(j); - return Some(fpath); - } - } - None + ty::TyTuple(fields) => fields.to_vec(), + ty::TyClosure(_, substs) => { + if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);} + substs.upvar_tys.to_vec() }, - - // Can we use one of the fields in this tuple? - ty::TyTuple(ref tys) => { - for (j, &ty) in tys.iter().enumerate() { - if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) { - fpath.push(j); - return Some(fpath); - } - } - None - }, - - // Is this a fixed-size array of something non-zero - // with at least one element? - ty::TyArray(ety, d) if d > 0 => { - if let Some(mut vpath) = find_discr_field_candidate(tcx, ety, path) { - vpath.push(0); - Some(vpath) - } else { - None - } - }, - - // Anything else is not a pointer - _ => None - } -} - -impl<'tcx> Case<'tcx> { - fn is_zerolen<'a>(&self, cx: &CrateContext<'a, 'tcx>, scapegoat: Ty<'tcx>) -> bool { - mk_struct(cx, &self.tys, false, scapegoat).size == 0 - } - - fn find_ptr<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Option { - for (i, &ty) in self.tys.iter().enumerate() { - if let Some(mut path) = find_discr_field_candidate(cx.tcx(), ty, vec![]) { - path.push(i); - return Some(path); - } - } - None - } -} - -fn get_cases<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - adt: ty::AdtDef<'tcx>, - substs: &subst::Substs<'tcx>) - -> Vec> { - adt.variants.iter().map(|vi| { - let field_tys = vi.fields.iter().map(|field| { - monomorphize::field_ty(tcx, substs, field) - }).collect(); - Case { discr: Disr::from(vi.disr_val), tys: field_tys } - }).collect() -} - -fn mk_struct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - tys: &[Ty<'tcx>], packed: bool, - scapegoat: Ty<'tcx>) - -> Struct<'tcx> { - let sized = tys.iter().all(|&ty| type_is_sized(cx.tcx(), ty)); - let lltys : Vec = if sized { - tys.iter().map(|&ty| type_of::sizing_type_of(cx, ty)).collect() - } else { - tys.iter().filter(|&ty| type_is_sized(cx.tcx(), *ty)) - .map(|&ty| type_of::sizing_type_of(cx, ty)).collect() - }; - - ensure_struct_fits_in_address_space(cx, &lltys[..], packed, scapegoat); - - let llty_rec = Type::struct_(cx, &lltys[..], packed); - Struct { - size: machine::llsize_of_alloc(cx, llty_rec), - align: machine::llalign_of_min(cx, llty_rec), - sized: sized, - packed: packed, - fields: tys.to_vec(), - } -} - -#[derive(Debug)] -struct IntBounds { - slo: i64, - shi: i64, - ulo: u64, - uhi: u64 -} - -fn mk_cenum<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - hint: Hint, bounds: &IntBounds) - -> Repr<'tcx> { - let it = range_to_inttype(cx, hint, bounds); - match it { - attr::SignedInt(_) => CEnum(it, Disr(bounds.slo as u64), Disr(bounds.shi as u64)), - attr::UnsignedInt(_) => CEnum(it, Disr(bounds.ulo), Disr(bounds.uhi)) - } -} - -fn range_to_inttype(cx: &CrateContext, hint: Hint, bounds: &IntBounds) -> IntType { - debug!("range_to_inttype: {:?} {:?}", hint, bounds); - // Lists of sizes to try. u64 is always allowed as a fallback. - #[allow(non_upper_case_globals)] - const choose_shortest: &'static [IntType] = &[ - attr::UnsignedInt(ast::UintTy::U8), attr::SignedInt(ast::IntTy::I8), - attr::UnsignedInt(ast::UintTy::U16), attr::SignedInt(ast::IntTy::I16), - attr::UnsignedInt(ast::UintTy::U32), attr::SignedInt(ast::IntTy::I32)]; - #[allow(non_upper_case_globals)] - const at_least_32: &'static [IntType] = &[ - attr::UnsignedInt(ast::UintTy::U32), attr::SignedInt(ast::IntTy::I32)]; - - let attempts; - match hint { - attr::ReprInt(span, ity) => { - if !bounds_usable(cx, ity, bounds) { - span_bug!(span, "representation hint insufficient for discriminant range") - } - return ity; - } - attr::ReprExtern => { - attempts = match &cx.sess().target.target.arch[..] { - // WARNING: the ARM EABI has two variants; the one corresponding to `at_least_32` - // appears to be used on Linux and NetBSD, but some systems may use the variant - // corresponding to `choose_shortest`. However, we don't run on those yet...? - "arm" => at_least_32, - _ => at_least_32, - } - } - attr::ReprAny => { - attempts = choose_shortest; - }, - attr::ReprPacked => { - bug!("range_to_inttype: found ReprPacked on an enum"); - } - attr::ReprSimd => { - bug!("range_to_inttype: found ReprSimd on an enum"); - } - } - for &ity in attempts { - if bounds_usable(cx, ity, bounds) { - return ity; - } - } - return attr::UnsignedInt(ast::UintTy::U64); -} - -pub fn ll_inttype(cx: &CrateContext, ity: IntType) -> Type { - match ity { - attr::SignedInt(t) => Type::int_from_ty(cx, t), - attr::UnsignedInt(t) => Type::uint_from_ty(cx, t) - } -} - -fn bounds_usable(cx: &CrateContext, ity: IntType, bounds: &IntBounds) -> bool { - debug!("bounds_usable: {:?} {:?}", ity, bounds); - match ity { - attr::SignedInt(_) => { - let lllo = C_integral(ll_inttype(cx, ity), bounds.slo as u64, true); - let llhi = C_integral(ll_inttype(cx, ity), bounds.shi as u64, true); - bounds.slo == const_to_int(lllo) as i64 && bounds.shi == const_to_int(llhi) as i64 - } - attr::UnsignedInt(_) => { - let lllo = C_integral(ll_inttype(cx, ity), bounds.ulo, false); - let llhi = C_integral(ll_inttype(cx, ity), bounds.uhi, false); - bounds.ulo == const_to_uint(lllo) as u64 && bounds.uhi == const_to_uint(llhi) as u64 - } - } -} - -pub fn ty_of_inttype<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ity: IntType) -> Ty<'tcx> { - match ity { - attr::SignedInt(t) => tcx.mk_mach_int(t), - attr::UnsignedInt(t) => tcx.mk_mach_uint(t) - } -} - -// LLVM doesn't like types that don't fit in the address space -fn ensure_struct_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fields: &[Type], - packed: bool, - scapegoat: Ty<'tcx>) { - let mut offset = 0; - for &llty in fields { - // Invariant: offset < ccx.obj_size_bound() <= 1<<61 - if !packed { - let type_align = machine::llalign_of_min(ccx, llty); - offset = roundup(offset, type_align); - } - // type_align is a power-of-2, so still offset < ccx.obj_size_bound() - // llsize_of_alloc(ccx, llty) is also less than ccx.obj_size_bound() - // so the sum is less than 1<<62 (and therefore can't overflow). - offset += machine::llsize_of_alloc(ccx, llty); - - if offset >= ccx.obj_size_bound() { - ccx.report_overbig_object(scapegoat); - } - } -} - -fn union_size_and_align(sts: &[Struct]) -> (machine::llsize, machine::llalign) { - let size = sts.iter().map(|st| st.size).max().unwrap(); - let align = sts.iter().map(|st| st.align).max().unwrap(); - (roundup(size, align), align) -} - -fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fields: &[Struct], - scapegoat: Ty<'tcx>) { - let (total_size, _) = union_size_and_align(fields); - - if total_size >= ccx.obj_size_bound() { - ccx.report_overbig_object(scapegoat); + _ => bug!("{} is not a type that can have fields.", t) } } +/// This represents the (GEP) indices to follow to get to the discriminant field +pub type DiscrField = Vec; /// LLVM-level types are a little complicated. /// @@ -721,10 +130,8 @@ fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, /// For nominal types, in some cases, we need to use LLVM named structs /// and fill in the actual contents in a second pass to prevent /// unbounded recursion; see also the comments in `trans::type_of`. -pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type { - let c = generic_type_of(cx, r, None, false, false, false); - assert!(!c.needs_drop_flag); - c.prefix +pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { + generic_type_of(cx, t, None, false, false) } @@ -732,79 +139,108 @@ pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type { // this out, but if you call this on an unsized type without realising it, you // are going to get the wrong type (it will not include the unsized parts of it). pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, dst: bool) -> Type { - let c = generic_type_of(cx, r, None, true, dst, false); - assert!(!c.needs_drop_flag); - c.prefix -} -pub fn sizing_type_context_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, dst: bool) -> TypeContext { - generic_type_of(cx, r, None, true, dst, true) + t: Ty<'tcx>, dst: bool) -> Type { + generic_type_of(cx, t, None, true, dst) } + pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, name: &str) -> Type { - let c = generic_type_of(cx, r, Some(name), false, false, false); - assert!(!c.needs_drop_flag); - c.prefix + t: Ty<'tcx>, name: &str) -> Type { + generic_type_of(cx, t, Some(name), false, false) } + pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, llty: &mut Type) { - match *r { - CEnum(..) | General(..) | RawNullablePointer { .. } => { } - Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } => - llty.set_struct_body(&struct_llfields(cx, st, false, false), - st.packed) + t: Ty<'tcx>, llty: &mut Type) { + let l = cx.layout_of(t); + debug!("finish_type_of: {} with layout {:#?}", t, l); + match *l { + layout::CEnum { .. } | layout::General { .. } + | layout::UntaggedUnion { .. } | layout::RawNullablePointer { .. } => { } + layout::Univariant { ..} + | layout::StructWrappedNullablePointer { .. } => { + let (nonnull_variant, packed) = match *l { + layout::Univariant { ref variant, .. } => (0, variant.packed), + layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => + (nndiscr, nonnull.packed), + _ => unreachable!() + }; + let fields = compute_fields(cx, t, nonnull_variant as usize, true); + llty.set_struct_body(&struct_llfields(cx, &fields, false, false), + packed) + }, + _ => bug!("This function cannot handle {} with layout {:#?}", t, l) } } fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, + t: Ty<'tcx>, name: Option<&str>, sizing: bool, - dst: bool, - delay_drop_flag: bool) -> TypeContext { - debug!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {} delay_drop_flag: {}", - r, name, sizing, dst, delay_drop_flag); - match *r { - CEnum(ity, _, _) => TypeContext::direct(ll_inttype(cx, ity)), - RawNullablePointer { nnty, .. } => - TypeContext::direct(type_of::sizing_type_of(cx, nnty)), - StructWrappedNullablePointer { nonnull: ref st, .. } => { + dst: bool) -> Type { + let l = cx.layout_of(t); + debug!("adt::generic_type_of t: {:?} name: {:?} sizing: {} dst: {}", + t, name, sizing, dst); + match *l { + layout::CEnum { discr, .. } => Type::from_integer(cx, discr), + layout::RawNullablePointer { nndiscr, .. } => { + let (def, substs) = match t.sty { + ty::TyAdt(d, s) => (d, s), + _ => bug!("{} is not an ADT", t) + }; + let nnty = monomorphize::field_ty(cx.tcx(), substs, + &def.variants[nndiscr as usize].fields[0]); + type_of::sizing_type_of(cx, nnty) + } + layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { + let fields = compute_fields(cx, t, nndiscr as usize, false); match name { None => { - TypeContext::direct( - Type::struct_(cx, &struct_llfields(cx, st, sizing, dst), - st.packed)) + Type::struct_(cx, &struct_llfields(cx, &fields, sizing, dst), + nonnull.packed) } Some(name) => { assert_eq!(sizing, false); - TypeContext::direct(Type::named_struct(cx, name)) + Type::named_struct(cx, name) } } } - Univariant(ref st, dtor_needed) => { - let dtor_needed = dtor_needed != 0; + layout::Univariant { ref variant, .. } => { + // Note that this case also handles empty enums. + // Thus the true as the final parameter here. + let fields = compute_fields(cx, t, 0, true); match name { None => { - let mut fields = struct_llfields(cx, st, sizing, dst); - if delay_drop_flag && dtor_needed { - fields.pop(); - } - TypeContext::may_need_drop_flag( - Type::struct_(cx, &fields, - st.packed), - delay_drop_flag && dtor_needed) + let fields = struct_llfields(cx, &fields, sizing, dst); + Type::struct_(cx, &fields, variant.packed) } Some(name) => { // Hypothesis: named_struct's can never need a // drop flag. (... needs validation.) assert_eq!(sizing, false); - TypeContext::direct(Type::named_struct(cx, name)) + Type::named_struct(cx, name) + } + } + } + layout::Vector { element, count } => { + let elem_ty = Type::from_primitive(cx, element); + Type::vector(&elem_ty, count) + } + layout::UntaggedUnion { ref variants, .. }=> { + // Use alignment-sized ints to fill all the union storage. + let size = variants.stride().bytes(); + let align = variants.align.abi(); + let fill = union_fill(cx, size, align); + match name { + None => { + Type::struct_(cx, &[fill], variants.packed) + } + Some(name) => { + let mut llty = Type::named_struct(cx, name); + llty.set_struct_body(&[fill], variants.packed); + llty } } } - General(ity, ref sts, dtor_needed) => { - let dtor_needed = dtor_needed != 0; + layout::General { discr, size, align, .. } => { // We need a representation that has: // * The alignment of the most-aligned field // * The size of the largest variant (rounded up to that alignment) @@ -817,144 +253,151 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // of the size. // // FIXME #10604: this breaks when vector types are present. - let (size, align) = union_size_and_align(&sts[..]); - let align_s = align as u64; - let discr_ty = ll_inttype(cx, ity); - let discr_size = machine::llsize_of_alloc(cx, discr_ty); - let padded_discr_size = roundup(discr_size, align); - assert_eq!(size % align_s, 0); // Ensure division in align_units comes out evenly - let align_units = (size - padded_discr_size) / align_s; - let fill_ty = match align_s { - 1 => Type::array(&Type::i8(cx), align_units), - 2 => Type::array(&Type::i16(cx), align_units), - 4 => Type::array(&Type::i32(cx), align_units), - 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 => - Type::array(&Type::i64(cx), align_units), - a if a.count_ones() == 1 => Type::array(&Type::vector(&Type::i32(cx), a / 4), - align_units), - _ => bug!("unsupported enum alignment: {}", align) - }; - assert_eq!(machine::llalign_of_min(cx, fill_ty), align); + let size = size.bytes(); + let align = align.abi(); + let discr_ty = Type::from_integer(cx, discr); + let discr_size = discr.size().bytes(); + let padded_discr_size = roundup(discr_size, align as u32); + let variant_part_size = size-padded_discr_size; + let variant_fill = union_fill(cx, variant_part_size, align); + + assert_eq!(machine::llalign_of_min(cx, variant_fill), align as u32); assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly - let mut fields: Vec = + let fields: Vec = [discr_ty, Type::array(&discr_ty, (padded_discr_size - discr_size)/discr_size), - fill_ty].iter().cloned().collect(); - if delay_drop_flag && dtor_needed { - fields.pop(); - } + variant_fill].iter().cloned().collect(); match name { None => { - TypeContext::may_need_drop_flag( - Type::struct_(cx, &fields[..], false), - delay_drop_flag && dtor_needed) + Type::struct_(cx, &fields[..], false) } Some(name) => { let mut llty = Type::named_struct(cx, name); llty.set_struct_body(&fields[..], false); - TypeContext::may_need_drop_flag( - llty, - delay_drop_flag && dtor_needed) + llty } } } + _ => bug!("Unsupported type {} represented as {:#?}", t, l) } } -fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>, +fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type { + assert_eq!(size%align, 0); + assert_eq!(align.count_ones(), 1, "Alignment must be a power fof 2. Got {}", align); + let align_units = size/align; + let dl = &cx.tcx().data_layout; + let layout_align = layout::Align::from_bytes(align, align).unwrap(); + if let Some(ity) = layout::Integer::for_abi_align(dl, layout_align) { + Type::array(&Type::from_integer(cx, ity), align_units) + } else { + Type::array(&Type::vector(&Type::i32(cx), align/4), + align_units) + } +} + + +fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec>, sizing: bool, dst: bool) -> Vec { if sizing { - st.fields.iter().filter(|&ty| !dst || type_is_sized(cx.tcx(), *ty)) + fields.iter().filter(|&ty| !dst || type_is_sized(cx.tcx(), *ty)) .map(|&ty| type_of::sizing_type_of(cx, ty)).collect() } else { - st.fields.iter().map(|&ty| type_of::in_memory_type_of(cx, ty)).collect() + fields.iter().map(|&ty| type_of::in_memory_type_of(cx, ty)).collect() } } /// Obtain a representation of the discriminant sufficient to translate /// destructuring; this may or may not involve the actual discriminant. -/// -/// This should ideally be less tightly tied to `_match`. pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - r: &Repr<'tcx>, + t: Ty<'tcx>, scrutinee: ValueRef, range_assert: bool) - -> (_match::BranchKind, Option) { - match *r { - CEnum(..) | General(..) | - RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { - (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None, - range_assert))) - } - Univariant(..) => { + -> (BranchKind, Option) { + let l = bcx.ccx().layout_of(t); + match *l { + layout::CEnum { .. } | layout::General { .. } | + layout::RawNullablePointer { .. } | layout::StructWrappedNullablePointer { .. } => { + (BranchKind::Switch, Some(trans_get_discr(bcx, t, scrutinee, None, range_assert))) + } + layout::Univariant { .. } | layout::UntaggedUnion { .. } => { // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants). - (_match::Single, None) - } + (BranchKind::Single, None) + }, + _ => bug!("{} is not an enum.", t) } } -pub fn is_discr_signed<'tcx>(r: &Repr<'tcx>) -> bool { - match *r { - CEnum(ity, _, _) => ity.is_signed(), - General(ity, _, _) => ity.is_signed(), - Univariant(..) => false, - RawNullablePointer { .. } => false, - StructWrappedNullablePointer { .. } => false, +pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { + match *l { + layout::CEnum { signed, .. }=> signed, + _ => false, } } /// Obtain the actual discriminant of a value. -pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, +pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, scrutinee: ValueRef, cast_to: Option, range_assert: bool) -> ValueRef { - debug!("trans_get_discr r: {:?}", r); - let val = match *r { - CEnum(ity, min, max) => { - load_discr(bcx, ity, scrutinee, min, max, range_assert) + let (def, substs) = match t.sty { + ty::TyAdt(ref def, substs) if def.adt_kind() == AdtKind::Enum => (def, substs), + _ => bug!("{} is not an enum", t) + }; + + debug!("trans_get_discr t: {:?}", t); + let l = bcx.ccx().layout_of(t); + + let val = match *l { + layout::CEnum { discr, min, max, .. } => { + load_discr(bcx, discr, scrutinee, min, max, range_assert) } - General(ity, ref cases, _) => { + layout::General { discr, .. } => { let ptr = StructGEP(bcx, scrutinee, 0); - load_discr(bcx, ity, ptr, Disr(0), Disr(cases.len() as u64 - 1), + load_discr(bcx, discr, ptr, 0, def.variants.len() as u64 - 1, range_assert) } - Univariant(..) => C_u8(bcx.ccx(), 0), - RawNullablePointer { nndiscr, nnty, .. } => { - let cmp = if nndiscr == Disr(0) { IntEQ } else { IntNE }; - let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); + layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx(), 0), + layout::RawNullablePointer { nndiscr, .. } => { + let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; + let llptrty = type_of::sizing_type_of(bcx.ccx(), + monomorphize::field_ty(bcx.ccx().tcx(), substs, + &def.variants[nndiscr as usize].fields[0])); ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None) } - StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { + layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee) - } + }, + _ => bug!("{} is not an enum", t) }; match cast_to { None => val, - Some(llty) => if is_discr_signed(r) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) } + Some(llty) => if is_discr_signed(&l) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) } } } -fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, discrfield: &DiscrField, +fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: u64, discrfield: &layout::FieldPath, scrutinee: ValueRef) -> ValueRef { - let llptrptr = GEPi(bcx, scrutinee, &discrfield[..]); + let llptrptr = GEPi(bcx, scrutinee, + &discrfield.iter().map(|f| *f as usize).collect::>()[..]); let llptr = Load(bcx, llptrptr); - let cmp = if nndiscr == Disr(0) { IntEQ } else { IntNE }; + let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None) } /// Helper for cases where the discriminant is simply loaded. -fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr, +fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, range_assert: bool) -> ValueRef { - let llty = ll_inttype(bcx.ccx(), ity); + let llty = Type::from_integer(bcx.ccx(), ity); assert_eq!(val_ty(ptr), llty.ptr_to()); - let bits = machine::llbitsize_of_real(bcx.ccx(), llty); + let bits = ity.size().bits(); assert!(bits <= 64); let bits = bits as usize; - let mask = Disr(!0u64 >> (64 - bits)); + let mask = !0u64 >> (64 - bits); // For a (max) discr of -1, max will be `-1 as usize`, which overflows. // However, that is fine here (it would still represent the full range), - if max.wrapping_add(Disr(1)) & mask == min & mask || !range_assert { + if max.wrapping_add(1) & mask == min & mask || !range_assert { // i.e., if the range is everything. The lo==hi case would be // rejected by the LLVM verifier (it would mean either an // empty set, which is impossible, or the entire range of the @@ -963,7 +406,7 @@ fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr, } else { // llvm::ConstantRange can deal with ranges that wrap around, // so an overflow on (max + 1) is fine. - LoadRangeAssert(bcx, ptr, min.0, max.0.wrapping_add(1), /* signed: */ True) + LoadRangeAssert(bcx, ptr, min, max.wrapping_add(1), /* signed: */ True) } } @@ -971,60 +414,54 @@ fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr, /// discriminant-like value returned by `trans_switch`. /// /// This should ideally be less tightly tied to `_match`. -pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr) +pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { - match *r { - CEnum(ity, _, _) => { - C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true) - } - General(ity, _, _) => { - C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true) + let l = bcx.ccx().layout_of(t); + match *l { + layout::CEnum { discr, .. } + | layout::General { discr, .. }=> { + C_integral(Type::from_integer(bcx.ccx(), discr), value.0, true) } - Univariant(..) => { - bug!("no cases for univariants or structs") + layout::RawNullablePointer { .. } | + layout::StructWrappedNullablePointer { .. } => { + assert!(value == Disr(0) || value == Disr(1)); + C_bool(bcx.ccx(), value != Disr(0)) } - RawNullablePointer { .. } | - StructWrappedNullablePointer { .. } => { - assert!(discr == Disr(0) || discr == Disr(1)); - C_bool(bcx.ccx(), discr != Disr(0)) + _ => { + bug!("{} does not have a discriminant. Represented as {:#?}", t, l); } } } /// Set the discriminant for a new value of the given case of the given /// representation. -pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, - val: ValueRef, discr: Disr) { - match *r { - CEnum(ity, min, max) => { - assert_discr_in_range(ity, min, max, discr); - Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true), +pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, + val: ValueRef, to: Disr) { + let l = bcx.ccx().layout_of(t); + match *l { + layout::CEnum{ discr, min, max, .. } => { + assert_discr_in_range(Disr(min), Disr(max), to); + Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), val); } - General(ity, ref cases, dtor) => { - if dtor_active(dtor) { - let ptr = trans_field_ptr(bcx, r, MaybeSizedValue::sized(val), discr, - cases[discr.0 as usize].fields.len() - 2); - Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), ptr); - } - Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true), + layout::General{ discr, .. } => { + Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), StructGEP(bcx, val, 0)); } - Univariant(ref st, dtor) => { - assert_eq!(discr, Disr(0)); - if dtor_active(dtor) { - Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), - StructGEP(bcx, val, st.fields.len() - 1)); - } + layout::Univariant { .. } + | layout::UntaggedUnion { .. } + | layout::Vector { .. } => { + assert_eq!(to, Disr(0)); } - RawNullablePointer { nndiscr, nnty, ..} => { - if discr != nndiscr { + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0]; + if to.0 != nndiscr { let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); Store(bcx, C_null(llptrty), val); } } - StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { - if discr != nndiscr { + layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { + if to.0 != nndiscr { if target_sets_discr_via_memset(bcx) { // Issue #34427: As workaround for LLVM bug on // ARM, use memset of 0 on whole struct rather @@ -1032,16 +469,18 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, let b = B(bcx); let llptr = b.pointercast(val, Type::i8(b.ccx).ptr_to()); let fill_byte = C_u8(b.ccx, 0); - let size = C_uint(b.ccx, nonnull.size); - let align = C_i32(b.ccx, nonnull.align as i32); + let size = C_uint(b.ccx, nonnull.stride().bytes()); + let align = C_i32(b.ccx, nonnull.align.abi() as i32); base::call_memset(&b, llptr, fill_byte, size, align, false); } else { - let llptrptr = GEPi(bcx, val, &discrfield[..]); + let path = discrfield.iter().map(|&i| i as usize).collect::>(); + let llptrptr = GEPi(bcx, val, &path[..]); let llptrty = val_ty(llptrptr).element_type(); Store(bcx, C_null(llptrty), llptrptr); } } } + _ => bug!("Cannot handle {} represented as {:#?}", t, l) } } @@ -1049,69 +488,59 @@ fn target_sets_discr_via_memset<'blk, 'tcx>(bcx: Block<'blk, 'tcx>) -> bool { bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" } -fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) { - match ity { - attr::UnsignedInt(_) => { - assert!(min <= discr); - assert!(discr <= max); - }, - attr::SignedInt(_) => { - assert!(min.0 as i64 <= discr.0 as i64); - assert!(discr.0 as i64 <= max.0 as i64); - }, - } -} - -/// The number of fields in a given case; for use when obtaining this -/// information from the type or definition is less convenient. -pub fn num_args(r: &Repr, discr: Disr) -> usize { - match *r { - CEnum(..) => 0, - Univariant(ref st, dtor) => { - assert_eq!(discr, Disr(0)); - st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 }) - } - General(_, ref cases, dtor) => { - cases[discr.0 as usize].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 }) - } - RawNullablePointer { nndiscr, ref nullfields, .. } => { - if discr == nndiscr { 1 } else { nullfields.len() } - } - StructWrappedNullablePointer { ref nonnull, nndiscr, - ref nullfields, .. } => { - if discr == nndiscr { nonnull.fields.len() } else { nullfields.len() } - } +fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { + if min <= max { + assert!(min <= discr && discr <= max) + } else { + assert!(min <= discr || discr <= max) } } /// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, +pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { - trans_field_ptr_builder(&bcx.build(), r, val, discr, ix) + trans_field_ptr_builder(&bcx.build(), t, val, discr, ix) } /// Access a field, at a point when the value's case is known. pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - r: &Repr<'tcx>, + t: Ty<'tcx>, val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { + let l = bcx.ccx().layout_of(t); + debug!("trans_field_ptr_builder on {} represented as {:#?}", t, l); // Note: if this ever needs to generate conditionals (e.g., if we // decide to do some kind of cdr-coding-like non-unique repr // someday), it will need to return a possibly-new bcx as well. - match *r { - CEnum(..) => { - bug!("element access in C-like enum") - } - Univariant(ref st, _dtor) => { + match *l { + layout::Univariant { ref variant, .. } => { assert_eq!(discr, Disr(0)); - struct_field_ptr(bcx, st, val, ix, false) - } - General(_, ref cases, _) => { - struct_field_ptr(bcx, &cases[discr.0 as usize], val, ix + 1, true) + struct_field_ptr(bcx, &variant, + &compute_fields(bcx.ccx(), t, 0, false), + val, ix, false) + } + layout::Vector { count, .. } => { + assert_eq!(discr.0, 0); + assert!((ix as u64) < count); + bcx.struct_gep(val.value, ix) + } + layout::General { discr: d, ref variants, .. } => { + let mut fields = compute_fields(bcx.ccx(), t, discr.0 as usize, false); + fields.insert(0, d.to_ty(&bcx.ccx().tcx(), false)); + struct_field_ptr(bcx, &variants[discr.0 as usize], + &fields, + val, ix + 1, true) + } + layout::UntaggedUnion { .. } => { + let fields = compute_fields(bcx.ccx(), t, 0, false); + let ty = type_of::in_memory_type_of(bcx.ccx(), fields[ix]); + if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } + bcx.pointercast(val.value, ty.ptr_to()) } - RawNullablePointer { nndiscr, ref nullfields, .. } | - StructWrappedNullablePointer { nndiscr, ref nullfields, .. } if discr != nndiscr => { + layout::RawNullablePointer { nndiscr, .. } | + layout::StructWrappedNullablePointer { nndiscr, .. } if discr.0 != nndiscr => { + let nullfields = compute_fields(bcx.ccx(), t, (1-nndiscr) as usize, false); // The unit-like case might have a nonzero number of unit-like fields. // (e.d., Result of Either with (), as one side.) let ty = type_of::type_of(bcx.ccx(), nullfields[ix]); @@ -1121,32 +550,36 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } bcx.pointercast(val.value, ty.ptr_to()) } - RawNullablePointer { nndiscr, nnty, .. } => { + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0]; assert_eq!(ix, 0); - assert_eq!(discr, nndiscr); + assert_eq!(discr.0, nndiscr); let ty = type_of::type_of(bcx.ccx(), nnty); if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } bcx.pointercast(val.value, ty.ptr_to()) } - StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - assert_eq!(discr, nndiscr); - struct_field_ptr(bcx, nonnull, val, ix, false) + layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + assert_eq!(discr.0, nndiscr); + struct_field_ptr(bcx, &nonnull, + &compute_fields(bcx.ccx(), t, discr.0 as usize, false), + val, ix, false) } + _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) } } fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - st: &Struct<'tcx>, val: MaybeSizedValue, + st: &layout::Struct, fields: &Vec>, val: MaybeSizedValue, ix: usize, needs_cast: bool) -> ValueRef { let ccx = bcx.ccx(); - let fty = st.fields[ix]; + let fty = fields[ix]; let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); if bcx.is_unreachable() { return C_undef(ll_fty.ptr_to()); } let ptr_val = if needs_cast { - let fields = st.fields.iter().map(|&ty| { + let fields = fields.iter().map(|&ty| { type_of::in_memory_type_of(ccx, ty) }).collect::>(); let real_ty = Type::struct_(ccx, &fields[..], st.packed); @@ -1198,14 +631,8 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let meta = val.meta; - // Calculate the unaligned offset of the unsized field. - let mut offset = 0; - for &ty in &st.fields[0..ix] { - let llty = type_of::sizing_type_of(ccx, ty); - let type_align = type_of::align_of(ccx, ty); - offset = roundup(offset, type_align); - offset += machine::llsize_of_alloc(ccx, llty); - } + + let offset = st.offset_of_field(ix).bytes(); let unaligned_offset = C_uint(bcx.ccx(), offset); // Get the alignment of the field @@ -1234,108 +661,6 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, bcx.pointercast(byte_ptr, ll_fty.ptr_to()) } -pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - r: &Repr<'tcx>, - value: ValueRef, - mut f: F) - -> Block<'blk, 'tcx> where - F: FnMut(Block<'blk, 'tcx>, &Struct<'tcx>, ValueRef) -> Block<'blk, 'tcx>, -{ - let fcx = bcx.fcx; - match *r { - Univariant(ref st, _) => { - f(bcx, st, value) - } - General(ity, ref cases, _) => { - let ccx = bcx.ccx(); - - // See the comments in trans/base.rs for more information (inside - // iter_structural_ty), but the gist here is that if the enum's - // discriminant is *not* in the range that we're expecting (in which - // case we'll take the fall-through branch on the switch - // instruction) then we can't just optimize this to an Unreachable - // block. - // - // Currently we still have filling drop, so this means that the drop - // glue for enums may be called when the enum has been paved over - // with the "I've been dropped" value. In this case the default - // branch of the switch instruction will actually be taken at - // runtime, so the basic block isn't actually unreachable, so we - // need to make it do something with defined behavior. In this case - // we just return early from the function. - // - // Note that this is also why the `trans_get_discr` below has - // `false` to indicate that loading the discriminant should - // not have a range assert. - let ret_void_cx = fcx.new_temp_block("enum-variant-iter-ret-void"); - RetVoid(ret_void_cx, DebugLoc::None); - - let discr_val = trans_get_discr(bcx, r, value, None, false); - let llswitch = Switch(bcx, discr_val, ret_void_cx.llbb, cases.len()); - let bcx_next = fcx.new_temp_block("enum-variant-iter-next"); - - for (discr, case) in cases.iter().enumerate() { - let mut variant_cx = fcx.new_temp_block( - &format!("enum-variant-iter-{}", &discr.to_string()) - ); - let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true); - AddCase(llswitch, rhs_val, variant_cx.llbb); - - let fields = case.fields.iter().map(|&ty| - type_of::type_of(bcx.ccx(), ty)).collect::>(); - let real_ty = Type::struct_(ccx, &fields[..], case.packed); - let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to()); - - variant_cx = f(variant_cx, case, variant_value); - Br(variant_cx, bcx_next.llbb, DebugLoc::None); - } - - bcx_next - } - _ => bug!() - } -} - -/// Access the struct drop flag, if present. -pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - r: &Repr<'tcx>, - val: ValueRef) - -> datum::DatumBlock<'blk, 'tcx, datum::Expr> -{ - let tcx = bcx.tcx(); - let ptr_ty = bcx.tcx().mk_imm_ptr(tcx.dtor_type()); - match *r { - Univariant(ref st, dtor) if dtor_active(dtor) => { - let flag_ptr = StructGEP(bcx, val, st.fields.len() - 1); - datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock() - } - General(_, _, dtor) if dtor_active(dtor) => { - let fcx = bcx.fcx; - let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); - let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum( - bcx, tcx.dtor_type(), "drop_flag", - InitAlloca::Uninit("drop flag itself has no dtor"), - cleanup::CustomScope(custom_cleanup_scope), |bcx, _| { - debug!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}", - tcx.dtor_type()); - bcx - } - )); - bcx = fold_variants(bcx, r, val, |variant_cx, st, value| { - let ptr = struct_field_ptr(&variant_cx.build(), st, - MaybeSizedValue::sized(value), - (st.fields.len() - 1), false); - datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr")) - .store_to(variant_cx, scratch.val) - }); - let expr_datum = scratch.to_expr_datum(); - fcx.pop_custom_cleanup_scope(custom_cleanup_scope); - datum::DatumBlock::new(bcx, expr_datum) - } - _ => bug!("tried to get drop flag of non-droppable type") - } -} - /// Construct a constant value, suitable for initializing a /// GlobalVariable, given a case and constant values for its fields. /// Note that this may have a different LLVM type (and different @@ -1355,77 +680,76 @@ pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, /// Currently the returned value has the same size as the type, but /// this could be changed in the future to avoid allocating unnecessary /// space after values of shorter-than-maximum cases. -pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr: Disr, +pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, discr: Disr, vals: &[ValueRef]) -> ValueRef { - match *r { - CEnum(ity, min, max) => { + let l = ccx.layout_of(t); + let dl = &ccx.tcx().data_layout; + match *l { + layout::CEnum { discr: d, min, max, .. } => { assert_eq!(vals.len(), 0); - assert_discr_in_range(ity, min, max, discr); - C_integral(ll_inttype(ccx, ity), discr.0, true) - } - General(ity, ref cases, _) => { - let case = &cases[discr.0 as usize]; - let (max_sz, _) = union_size_and_align(&cases[..]); - let lldiscr = C_integral(ll_inttype(ccx, ity), discr.0 as u64, true); - let mut f = vec![lldiscr]; - f.extend_from_slice(vals); - let mut contents = build_const_struct(ccx, case, &f[..]); - contents.extend_from_slice(&[padding(ccx, max_sz - case.size)]); + assert_discr_in_range(Disr(min), Disr(max), discr); + C_integral(Type::from_integer(ccx, d), discr.0, true) + } + layout::General { discr: d, ref variants, .. } => { + let variant = &variants[discr.0 as usize]; + let lldiscr = C_integral(Type::from_integer(ccx, d), discr.0 as u64, true); + let mut vals_with_discr = vec![lldiscr]; + vals_with_discr.extend_from_slice(vals); + let aligned_size = l.size(dl).bytes(); + let contents = build_const_struct(ccx, &variant.offset_after_field[..], + &vals_with_discr[..], variant.packed, aligned_size); C_struct(ccx, &contents[..], false) } - Univariant(ref st, _dro) => { + layout::UntaggedUnion { ref variants, .. }=> { assert_eq!(discr, Disr(0)); - let contents = build_const_struct(ccx, st, vals); - C_struct(ccx, &contents[..], st.packed) + let contents = build_const_union(ccx, variants, vals[0]); + C_struct(ccx, &contents, variants.packed) } - RawNullablePointer { nndiscr, nnty, .. } => { - if discr == nndiscr { + layout::Univariant { ref variant, .. } => { + assert_eq!(discr, Disr(0)); + let aligned_size = l.size(dl).bytes(); + let contents = build_const_struct(ccx, &variant.offset_after_field[..], + vals, variant.packed, aligned_size); + C_struct(ccx, &contents[..], variant.packed) + } + layout::Vector { .. } => { + C_vector(vals) + } + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = compute_fields(ccx, t, nndiscr as usize, false)[0]; + if discr.0 == nndiscr { assert_eq!(vals.len(), 1); vals[0] } else { C_null(type_of::sizing_type_of(ccx, nnty)) } } - StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - if discr == nndiscr { + layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + let aligned_size = l.size(dl).bytes(); + if discr.0 == nndiscr { C_struct(ccx, &build_const_struct(ccx, - nonnull, - vals), + &nonnull.offset_after_field[..], + vals, nonnull.packed, aligned_size), false) } else { - let vals = nonnull.fields.iter().map(|&ty| { + let fields = compute_fields(ccx, t, nndiscr as usize, false); + let vals = fields.iter().map(|&ty| { // Always use null even if it's not the `discrfield`th // field; see #8506. C_null(type_of::sizing_type_of(ccx, ty)) }).collect::>(); C_struct(ccx, &build_const_struct(ccx, - nonnull, - &vals[..]), + &nonnull.offset_after_field[..], + &vals[..], + false, + aligned_size), false) } } + _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) } } -/// Compute struct field offsets relative to struct begin. -fn compute_struct_field_offsets<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - st: &Struct<'tcx>) -> Vec { - let mut offsets = vec!(); - - let mut offset = 0; - for &ty in &st.fields { - let llty = type_of::sizing_type_of(ccx, ty); - if !st.packed { - let type_align = type_of::align_of(ccx, ty); - offset = roundup(offset, type_align); - } - offsets.push(offset); - offset += machine::llsize_of_alloc(ccx, llty); - } - assert_eq!(st.fields.len(), offsets.len()); - offsets -} - /// Building structs is a little complicated, because we might need to /// insert padding if a field's value is less aligned than its type. /// @@ -1435,17 +759,26 @@ fn compute_struct_field_offsets<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, /// a two-element struct will locate it at offset 4, and accesses to it /// will read the wrong memory. fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - st: &Struct<'tcx>, vals: &[ValueRef]) + offset_after_field: &[layout::Size], + vals: &[ValueRef], + packed: bool, + aligned_size: u64) -> Vec { - assert_eq!(vals.len(), st.fields.len()); + assert_eq!(vals.len(), offset_after_field.len()); - let target_offsets = compute_struct_field_offsets(ccx, st); + if vals.len() == 0 { + return Vec::new(); + } // offset of current value let mut offset = 0; let mut cfields = Vec::new(); + let target_offsets = offset_after_field.iter().map(|i| i.bytes()); for (&val, target_offset) in vals.iter().zip(target_offsets) { - if !st.packed { + assert!(!is_undef(val)); + cfields.push(val); + offset += machine::llsize_of_alloc(ccx, val_ty(val)); + if !packed { let val_align = machine::llalign_of_min(ccx, val_ty(val)); offset = roundup(offset, val_align); } @@ -1453,14 +786,25 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, cfields.push(padding(ccx, target_offset - offset)); offset = target_offset; } - assert!(!is_undef(val)); - cfields.push(val); - offset += machine::llsize_of_alloc(ccx, val_ty(val)); } - assert!(st.sized && offset <= st.size); - if offset != st.size { - cfields.push(padding(ccx, st.size - offset)); + if offset < aligned_size { + cfields.push(padding(ccx, aligned_size - offset)); + } + + cfields +} + +fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + un: &layout::Union, + field_val: ValueRef) + -> Vec { + let mut cfields = vec![field_val]; + + let offset = machine::llsize_of_alloc(ccx, val_ty(field_val)); + let size = un.stride().bytes(); + if offset != size { + cfields.push(padding(ccx, size - offset)); } cfields @@ -1474,44 +818,26 @@ fn padding(ccx: &CrateContext, size: u64) -> ValueRef { #[inline] fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } -/// Get the discriminant of a constant value. -pub fn const_get_discrim(r: &Repr, val: ValueRef) -> Disr { - match *r { - CEnum(ity, _, _) => { - match ity { - attr::SignedInt(..) => Disr(const_to_int(val) as u64), - attr::UnsignedInt(..) => Disr(const_to_uint(val)), - } - } - General(ity, _, _) => { - match ity { - attr::SignedInt(..) => Disr(const_to_int(const_get_elt(val, &[0])) as u64), - attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(val, &[0]))) - } - } - Univariant(..) => Disr(0), - RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { - bug!("const discrim access of non c-like enum") - } - } -} - /// Extract a field of a constant value, as appropriate for its /// representation. /// /// (Not to be confused with `common::const_get_elt`, which operates on /// raw LLVM-level structs and arrays.) -pub fn const_get_field(r: &Repr, val: ValueRef, _discr: Disr, +pub fn const_get_field<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, + val: ValueRef, _discr: Disr, ix: usize) -> ValueRef { - match *r { - CEnum(..) => bug!("element access in C-like enum const"), - Univariant(..) => const_struct_field(val, ix), - General(..) => const_struct_field(val, ix + 1), - RawNullablePointer { .. } => { + let l = ccx.layout_of(t); + match *l { + layout::CEnum { .. } => bug!("element access in C-like enum const"), + layout::Univariant { .. } | layout::Vector { .. } => const_struct_field(val, ix), + layout::UntaggedUnion { .. } => const_struct_field(val, 0), + layout::General { .. } => const_struct_field(val, ix + 1), + layout::RawNullablePointer { .. } => { assert_eq!(ix, 0); val }, - StructWrappedNullablePointer{ .. } => const_struct_field(val, ix) + layout::StructWrappedNullablePointer{ .. } => const_struct_field(val, ix), + _ => bug!("{} does not have fields.", t) } } diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 5514fb0f4e..308118b1fb 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -14,28 +14,29 @@ use llvm::{self, ValueRef}; use base; use build::*; use common::*; -use datum::{Datum, Lvalue}; use type_of; use type_::Type; -use rustc::hir as ast; +use rustc::hir; +use rustc::ty::Ty; + use std::ffi::CString; use syntax::ast::AsmDialect; use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ia: &ast::InlineAsm, - outputs: Vec>, + ia: &hir::InlineAsm, + outputs: Vec<(ValueRef, Ty<'tcx>)>, mut inputs: Vec) { let mut ext_constraints = vec![]; let mut output_types = vec![]; // Prepare the output operands let mut indirect_outputs = vec![]; - for (i, (out, out_datum)) in ia.outputs.iter().zip(&outputs).enumerate() { + for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() { let val = if out.is_rw || out.is_indirect { - Some(base::load_ty(bcx, out_datum.val, out_datum.ty)) + Some(base::load_ty(bcx, val, ty)) } else { None }; @@ -46,7 +47,7 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, if out.is_indirect { indirect_outputs.push(val.unwrap()); } else { - output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty)); + output_types.push(type_of::type_of(bcx.ccx(), ty)); } } if !indirect_outputs.is_empty() { @@ -100,9 +101,9 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Again, based on how many outputs we have let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); - for (i, (_, datum)) in outputs.enumerate() { + for (i, (_, &(val, _))) in outputs.enumerate() { let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) }; - Store(bcx, v, datum.val); + Store(bcx, v, val); } // Store expn_id in a metadata node so we can map LLVM errors diff --git a/src/librustc_trans/assert_module_sources.rs b/src/librustc_trans/assert_module_sources.rs index e0532e7476..7fe6d2bbfe 100644 --- a/src/librustc_trans/assert_module_sources.rs +++ b/src/librustc_trans/assert_module_sources.rs @@ -29,7 +29,6 @@ use rustc::ty::TyCtxt; use syntax::ast; -use syntax::attr::AttrMetaMethods; use syntax::parse::token::InternedString; use {ModuleSource, ModuleTranslation}; diff --git a/src/librustc_trans/back/link.rs b/src/librustc_trans/back/link.rs index f17d1a7f1c..5dab82dbc7 100644 --- a/src/librustc_trans/back/link.rs +++ b/src/librustc_trans/back/link.rs @@ -26,10 +26,11 @@ use CrateTranslation; use util::common::time; use util::fs::fix_windows_verbatim_for_gcc; use rustc::dep_graph::DepNode; -use rustc::ty::TyCtxt; +use rustc::hir::def_id::CrateNum; +use rustc::hir::svh::Svh; use rustc_back::tempdir::TempDir; +use rustc_incremental::IncrementalHashesMap; -use rustc_incremental::SvhCalculate; use std::ascii; use std::char; use std::env; @@ -42,7 +43,6 @@ use std::process::Command; use std::str; use flate; use syntax::ast; -use syntax::attr::AttrMetaMethods; use syntax_pos::Span; // RLIB LLVM-BYTECODE OBJECT LAYOUT @@ -125,12 +125,12 @@ pub fn find_crate_name(sess: Option<&Session>, } -pub fn build_link_meta<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - name: &str) - -> LinkMeta { +pub fn build_link_meta(incremental_hashes_map: &IncrementalHashesMap, + name: &str) + -> LinkMeta { let r = LinkMeta { crate_name: name.to_owned(), - crate_hash: tcx.calculate_krate_hash(), + crate_hash: Svh::new(incremental_hashes_map[&DepNode::Krate]), }; info!("{:?}", r); return r; @@ -239,6 +239,7 @@ pub fn invalid_output_for_target(sess: &Session, match (sess.target.target.options.dynamic_linking, sess.target.target.options.executables, crate_type) { (false, _, config::CrateTypeCdylib) | + (false, _, config::CrateTypeRustcMacro) | (false, _, config::CrateTypeDylib) => true, (_, false, config::CrateTypeExecutable) => true, _ => false @@ -262,6 +263,7 @@ pub fn filename_for_input(sess: &Session, outputs.out_directory.join(&format!("lib{}.rlib", libname)) } config::CrateTypeCdylib | + config::CrateTypeRustcMacro | config::CrateTypeDylib => { let (prefix, suffix) = (&sess.target.target.options.dll_prefix, &sess.target.target.options.dll_suffix); @@ -287,17 +289,18 @@ pub fn filename_for_input(sess: &Session, } pub fn each_linked_rlib(sess: &Session, - f: &mut FnMut(ast::CrateNum, &Path)) { + f: &mut FnMut(CrateNum, &Path)) { let crates = sess.cstore.used_crates(LinkagePreference::RequireStatic).into_iter(); let fmts = sess.dependency_formats.borrow(); let fmts = fmts.get(&config::CrateTypeExecutable) .or_else(|| fmts.get(&config::CrateTypeStaticlib)) - .or_else(|| fmts.get(&config::CrateTypeCdylib)); + .or_else(|| fmts.get(&config::CrateTypeCdylib)) + .or_else(|| fmts.get(&config::CrateTypeRustcMacro)); let fmts = fmts.unwrap_or_else(|| { bug!("could not find formats for rlibs") }); for (cnum, path) in crates { - match fmts[cnum as usize - 1] { + match fmts[cnum.as_usize() - 1] { Linkage::NotLinked | Linkage::IncludedFromDylib => continue, _ => {} } @@ -571,10 +574,6 @@ fn write_rlib_bytecode_object_v1(writer: &mut Write, fn link_staticlib(sess: &Session, objects: &[PathBuf], out_filename: &Path, tempdir: &Path) { let mut ab = link_rlib(sess, None, objects, out_filename, tempdir); - if !sess.target.target.options.no_compiler_rt { - ab.add_native_library("compiler-rt"); - } - let mut all_native_libs = vec![]; each_linked_rlib(sess, &mut |cnum, path| { @@ -638,9 +637,6 @@ fn link_natively(sess: &Session, let mut linker = trans.linker_info.to_linker(&mut cmd, &sess); link_args(&mut *linker, sess, crate_type, tmpdir, objects, out_filename, outputs); - if !sess.target.target.options.no_compiler_rt { - linker.link_staticlib("compiler-rt"); - } } cmd.args(&sess.target.target.options.late_link_args); for obj in &sess.target.target.options.post_link_objects { @@ -739,7 +735,8 @@ fn link_args(cmd: &mut Linker, // When linking a dynamic library, we put the metadata into a section of the // executable. This metadata is in a separate object file from the main // object file, so we link that in here. - if crate_type == config::CrateTypeDylib { + if crate_type == config::CrateTypeDylib || + crate_type == config::CrateTypeRustcMacro { cmd.add_object(&outputs.with_extension("metadata.o")); } @@ -757,7 +754,8 @@ fn link_args(cmd: &mut Linker, let empty_vec = Vec::new(); let empty_str = String::new(); let args = sess.opts.cg.link_args.as_ref().unwrap_or(&empty_vec); - let mut args = args.iter().chain(used_link_args.iter()); + let more_args = &sess.opts.cg.link_arg; + let mut args = args.iter().chain(more_args.iter()).chain(used_link_args.iter()); let relocation_model = sess.opts.cg.relocation_model.as_ref() .unwrap_or(&empty_str); if (t.options.relocation_model == "pic" || *relocation_model == "pic") @@ -847,6 +845,7 @@ fn link_args(cmd: &mut Linker, if let Some(ref args) = sess.opts.cg.link_args { cmd.args(args); } + cmd.args(&sess.opts.cg.link_arg); cmd.args(&used_link_args); } @@ -930,17 +929,24 @@ fn add_upstream_rust_crates(cmd: &mut Linker, // crates. let deps = sess.cstore.used_crates(LinkagePreference::RequireDynamic); + let mut compiler_builtins = None; + for &(cnum, _) in &deps { // We may not pass all crates through to the linker. Some crates may // appear statically in an existing dylib, meaning we'll pick up all the // symbols from the dylib. let src = sess.cstore.used_crate_source(cnum); - match data[cnum as usize - 1] { + match data[cnum.as_usize() - 1] { + // compiler-builtins are always placed last to ensure that they're + // linked correctly. + _ if sess.cstore.is_compiler_builtins(cnum) => { + assert!(compiler_builtins.is_none()); + compiler_builtins = Some(cnum); + } Linkage::NotLinked | Linkage::IncludedFromDylib => {} Linkage::Static => { - add_static_crate(cmd, sess, tmpdir, crate_type, - &src.rlib.unwrap().0) + add_static_crate(cmd, sess, tmpdir, crate_type, cnum); } Linkage::Dynamic => { add_dynamic_crate(cmd, sess, &src.dylib.unwrap().0) @@ -948,6 +954,13 @@ fn add_upstream_rust_crates(cmd: &mut Linker, } } + // We must always link the `compiler_builtins` crate statically. Even if it + // was already "included" in a dylib (e.g. `libstd` when `-C prefer-dynamic` + // is used) + if let Some(cnum) = compiler_builtins { + add_static_crate(cmd, sess, tmpdir, crate_type, cnum); + } + // Converts a library file-stem into a cc -l argument fn unlib<'a>(config: &config::Config, stem: &'a str) -> &'a str { if stem.starts_with("lib") && !config.target.options.is_like_windows { @@ -964,12 +977,16 @@ fn add_upstream_rust_crates(cmd: &mut Linker, // * For LTO, we remove upstream object files. // * For dylibs we remove metadata and bytecode from upstream rlibs // - // When performing LTO, all of the bytecode from the upstream libraries has - // already been included in our object file output. As a result we need to - // remove the object files in the upstream libraries so the linker doesn't - // try to include them twice (or whine about duplicate symbols). We must - // continue to include the rest of the rlib, however, as it may contain - // static native libraries which must be linked in. + // When performing LTO, almost(*) all of the bytecode from the upstream + // libraries has already been included in our object file output. As a + // result we need to remove the object files in the upstream libraries so + // the linker doesn't try to include them twice (or whine about duplicate + // symbols). We must continue to include the rest of the rlib, however, as + // it may contain static native libraries which must be linked in. + // + // (*) Crates marked with `#![no_builtins]` don't participate in LTO and + // their bytecode wasn't included. The object files in those libraries must + // still be passed to the linker. // // When making a dynamic library, linkers by default don't include any // object files in an archive if they're not necessary to resolve the link. @@ -989,7 +1006,9 @@ fn add_upstream_rust_crates(cmd: &mut Linker, sess: &Session, tmpdir: &Path, crate_type: config::CrateType, - cratepath: &Path) { + cnum: CrateNum) { + let src = sess.cstore.used_crate_source(cnum); + let cratepath = &src.rlib.unwrap().0; if !sess.lto() && crate_type != config::CrateTypeDylib { cmd.link_rlib(&fix_windows_verbatim_for_gcc(cratepath)); return @@ -1013,7 +1032,14 @@ fn add_upstream_rust_crates(cmd: &mut Linker, } let canonical = f.replace("-", "_"); let canonical_name = name.replace("-", "_"); - if sess.lto() && canonical.starts_with(&canonical_name) && + + // If we're performing LTO and this is a rust-generated object + // file, then we don't need the object file as it's part of the + // LTO module. Note that `#![no_builtins]` is excluded from LTO, + // though, so we let that object file slide. + if sess.lto() && + !sess.cstore.is_no_builtins(cnum) && + canonical.starts_with(&canonical_name) && canonical.ends_with(".o") { let num = &f[name.len()..f.len() - 2]; if num.len() > 0 && num[1..].parse::().is_ok() { @@ -1024,13 +1050,23 @@ fn add_upstream_rust_crates(cmd: &mut Linker, any_objects = true; } - if any_objects { - archive.build(); - if crate_type == config::CrateTypeDylib { - cmd.link_whole_rlib(&fix_windows_verbatim_for_gcc(&dst)); - } else { - cmd.link_rlib(&fix_windows_verbatim_for_gcc(&dst)); - } + if !any_objects { + return + } + archive.build(); + + // If we're creating a dylib, then we need to include the + // whole of each object in our archive into that artifact. This is + // because a `dylib` can be reused as an intermediate artifact. + // + // Note, though, that we don't want to include the whole of a + // compiler-builtins crate (e.g. compiler-rt) because it'll get + // repeatedly linked anyway. + if crate_type == config::CrateTypeDylib && + !sess.cstore.is_compiler_builtins(cnum) { + cmd.link_whole_rlib(&fix_windows_verbatim_for_gcc(&dst)); + } else { + cmd.link_rlib(&fix_windows_verbatim_for_gcc(&dst)); } }); } diff --git a/src/librustc_trans/back/linker.rs b/src/librustc_trans/back/linker.rs index cb990ead8e..dd14f98c92 100644 --- a/src/librustc_trans/back/linker.rs +++ b/src/librustc_trans/back/linker.rs @@ -8,10 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::collections::HashMap; use std::ffi::OsString; use std::fs::{self, File}; -use std::io::{self, BufWriter}; use std::io::prelude::*; +use std::io::{self, BufWriter}; use std::path::{Path, PathBuf}; use std::process::Command; @@ -20,24 +21,24 @@ use monomorphize::Instance; use back::archive; use middle::dependency_format::Linkage; +use rustc::hir::def_id::CrateNum; use session::Session; use session::config::CrateType; use session::config; -use syntax::ast; /// For all the linkers we support, and information they might /// need out of the shared crate context before we get rid of it. pub struct LinkerInfo { - dylib_exports: Vec, - cdylib_exports: Vec + exports: HashMap>, } impl<'a, 'tcx> LinkerInfo { pub fn new(scx: &SharedCrateContext<'a, 'tcx>, reachable: &[String]) -> LinkerInfo { LinkerInfo { - dylib_exports: exported_symbols(scx, reachable, CrateType::CrateTypeDylib), - cdylib_exports: exported_symbols(scx, reachable, CrateType::CrateTypeCdylib) + exports: scx.sess().crate_types.borrow().iter().map(|&c| { + (c, exported_symbols(scx, reachable, c)) + }).collect(), } } @@ -243,33 +244,54 @@ impl<'a> Linker for GnuLinker<'a> { // exported symbols to ensure we don't expose any more. The object files // have far more public symbols than we actually want to export, so we // hide them all here. - if crate_type == CrateType::CrateTypeDylib { + if crate_type == CrateType::CrateTypeDylib || + crate_type == CrateType::CrateTypeRustcMacro { return } + let mut arg = OsString::new(); let path = tmpdir.join("list"); - let prefix = if self.sess.target.target.options.is_like_osx { - "_" - } else { - "" - }; - let res = (|| -> io::Result<()> { - let mut f = BufWriter::new(File::create(&path)?); - for sym in &self.info.cdylib_exports { - writeln!(f, "{}{}", prefix, sym)?; + + if self.sess.target.target.options.is_like_solaris { + let res = (|| -> io::Result<()> { + let mut f = BufWriter::new(File::create(&path)?); + writeln!(f, "{{\n global:")?; + for sym in self.info.exports[&crate_type].iter() { + writeln!(f, " {};", sym)?; + } + writeln!(f, "\n local:\n *;\n}};")?; + Ok(()) + })(); + if let Err(e) = res { + self.sess.fatal(&format!("failed to write version script: {}", e)); } - Ok(()) - })(); - if let Err(e) = res { - self.sess.fatal(&format!("failed to write lib.def file: {}", e)); - } - let mut arg = OsString::new(); - if self.sess.target.target.options.is_like_osx { - arg.push("-Wl,-exported_symbols_list,"); + + arg.push("-Wl,-M,"); + arg.push(&path); } else { - arg.push("-Wl,--retain-symbols-file="); + let prefix = if self.sess.target.target.options.is_like_osx { + "_" + } else { + "" + }; + let res = (|| -> io::Result<()> { + let mut f = BufWriter::new(File::create(&path)?); + for sym in self.info.exports[&crate_type].iter() { + writeln!(f, "{}{}", prefix, sym)?; + } + Ok(()) + })(); + if let Err(e) = res { + self.sess.fatal(&format!("failed to write lib.def file: {}", e)); + } + if self.sess.target.target.options.is_like_osx { + arg.push("-Wl,-exported_symbols_list,"); + } else { + arg.push("-Wl,--retain-symbols-file="); + } + arg.push(&path); } - arg.push(&path); + self.cmd.arg(arg); } } @@ -407,12 +429,7 @@ impl<'a> Linker for MsvcLinker<'a> { // straight to exports. writeln!(f, "LIBRARY")?; writeln!(f, "EXPORTS")?; - let symbols = if crate_type == CrateType::CrateTypeCdylib { - &self.info.cdylib_exports - } else { - &self.info.dylib_exports - }; - for symbol in symbols { + for symbol in self.info.exports[&crate_type].iter() { writeln!(f, " {}", symbol)?; } Ok(()) @@ -430,13 +447,10 @@ fn exported_symbols(scx: &SharedCrateContext, reachable: &[String], crate_type: CrateType) -> Vec { - if !scx.sess().crate_types.borrow().contains(&crate_type) { - return vec![]; - } - // See explanation in GnuLinker::export_symbols, for // why we don't ever need dylib symbols on non-MSVC. - if crate_type == CrateType::CrateTypeDylib { + if crate_type == CrateType::CrateTypeDylib || + crate_type == CrateType::CrateTypeRustcMacro { if !scx.sess().target.target.options.is_like_msvc { return vec![]; } @@ -459,7 +473,7 @@ fn exported_symbols(scx: &SharedCrateContext, let deps = formats[&crate_type].iter(); symbols.extend(deps.enumerate().filter_map(|(i, f)| { if *f == Linkage::Static { - Some((i + 1) as ast::CrateNum) + Some(CrateNum::new(i + 1)) } else { None } diff --git a/src/librustc_trans/back/lto.rs b/src/librustc_trans/back/lto.rs index 69e4a50804..522864c6ec 100644 --- a/src/librustc_trans/back/lto.rs +++ b/src/librustc_trans/back/lto.rs @@ -52,7 +52,12 @@ pub fn run(sess: &session::Session, llmod: ModuleRef, // For each of our upstream dependencies, find the corresponding rlib and // load the bitcode from the archive. Then merge it into the current LLVM // module that we've got. - link::each_linked_rlib(sess, &mut |_, path| { + link::each_linked_rlib(sess, &mut |cnum, path| { + // `#![no_builtins]` crates don't participate in LTO. + if sess.cstore.is_no_builtins(cnum) { + return; + } + let archive = ArchiveRO::open(&path).expect("wanted an rlib"); let bytecodes = archive.iter().filter_map(|child| { child.ok().and_then(|c| c.name().map(|name| (name, c))) diff --git a/src/librustc_back/rpath.rs b/src/librustc_trans/back/rpath.rs similarity index 98% rename from src/librustc_back/rpath.rs rename to src/librustc_trans/back/rpath.rs index 6cba27fcf3..4ed860bd40 100644 --- a/src/librustc_back/rpath.rs +++ b/src/librustc_trans/back/rpath.rs @@ -12,10 +12,11 @@ use std::collections::HashSet; use std::env; use std::path::{Path, PathBuf}; use std::fs; -use syntax::ast; + +use rustc::hir::def_id::CrateNum; pub struct RPathConfig<'a> { - pub used_crates: Vec<(ast::CrateNum, Option)>, + pub used_crates: Vec<(CrateNum, Option)>, pub out_filename: PathBuf, pub is_like_osx: bool, pub has_rpath: bool, diff --git a/src/librustc_trans/back/symbol_names.rs b/src/librustc_trans/back/symbol_names.rs index 5e2c0805c2..f0661e03bc 100644 --- a/src/librustc_trans/back/symbol_names.rs +++ b/src/librustc_trans/back/symbol_names.rs @@ -97,24 +97,36 @@ //! virtually impossible. Thus, symbol hash generation exclusively relies on //! DefPaths which are much more robust in the face of changes to the code base. -use common::{CrateContext, SharedCrateContext, gensym_name}; +use common::SharedCrateContext; use monomorphize::Instance; use util::sha2::{Digest, Sha256}; -use rustc::middle::{cstore, weak_lang_items}; -use rustc::hir::def_id::DefId; +use rustc::middle::weak_lang_items; +use rustc::hir::def_id::LOCAL_CRATE; use rustc::hir::map as hir_map; -use rustc::ty::{self, TyCtxt, TypeFoldable}; +use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::fold::TypeVisitor; use rustc::ty::item_path::{self, ItemPathBuffer, RootMode}; +use rustc::ty::subst::Substs; use rustc::hir::map::definitions::{DefPath, DefPathData}; +use rustc::util::common::record_time; use syntax::attr; use syntax::parse::token::{self, InternedString}; use serialize::hex::ToHex; -pub fn def_id_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> String { - let def_path = tcx.def_path(def_id); - def_path.to_string(tcx) +use std::hash::Hasher; + +struct Sha256Hasher<'a>(&'a mut Sha256); + +impl<'a> Hasher for Sha256Hasher<'a> { + fn write(&mut self, msg: &[u8]) { + self.0.input(msg) + } + + fn finish(&self) -> u64 { + bug!("Sha256Hasher::finish should not be called"); + } } fn get_symbol_hash<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, @@ -126,53 +138,50 @@ fn get_symbol_hash<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, // parameters substituted; this is // included in the hash as a kind of // safeguard. - item_type: ty::Ty<'tcx>, + item_type: Ty<'tcx>, // values for generic type parameters, // if any. - parameters: &[ty::Ty<'tcx>]) + substs: Option<&'tcx Substs<'tcx>>) -> String { debug!("get_symbol_hash(def_path={:?}, parameters={:?})", - def_path, parameters); + def_path, substs); let tcx = scx.tcx(); let mut hash_state = scx.symbol_hasher().borrow_mut(); + record_time(&tcx.sess.perf_stats.symbol_hash_time, || { + hash_state.reset(); + let hasher = Sha256Hasher(&mut hash_state); + let mut hasher = ty::util::TypeIdHasher::new(tcx, hasher); + + // the main symbol name is not necessarily unique; hash in the + // compiler's internal def-path, guaranteeing each symbol has a + // truly unique path + hasher.def_path(def_path); + + // Include the main item-type. Note that, in this case, the + // assertions about `needs_subst` may not hold, but this item-type + // ought to be the same for every reference anyway. + assert!(!item_type.has_erasable_regions()); + hasher.visit_ty(item_type); + + // also include any type parameters (for generic items) + if let Some(substs) = substs { + assert!(!substs.has_erasable_regions()); + assert!(!substs.needs_subst()); + substs.visit_with(&mut hasher); + } + }); - hash_state.reset(); - - // the main symbol name is not necessarily unique; hash in the - // compiler's internal def-path, guaranteeing each symbol has a - // truly unique path - hash_state.input_str(&def_path.to_string(tcx)); - - // Include the main item-type. Note that, in this case, the - // assertions about `needs_subst` may not hold, but this item-type - // ought to be the same for every reference anyway. - assert!(!item_type.has_erasable_regions()); - let encoded_item_type = tcx.sess.cstore.encode_type(tcx, item_type, def_id_to_string); - hash_state.input(&encoded_item_type[..]); - - // also include any type parameters (for generic items) - for t in parameters { - assert!(!t.has_erasable_regions()); - assert!(!t.needs_subst()); - let encoded_type = tcx.sess.cstore.encode_type(tcx, t, def_id_to_string); - hash_state.input(&encoded_type[..]); - } - - return format!("h{}", truncated_hash_result(&mut *hash_state)); - - fn truncated_hash_result(symbol_hasher: &mut Sha256) -> String { - let output = symbol_hasher.result_bytes(); - // 64 bits should be enough to avoid collisions. - output[.. 8].to_hex() - } + // 64 bits should be enough to avoid collisions. + let output = hash_state.result_bytes(); + format!("h{}", output[..8].to_hex()) } impl<'a, 'tcx> Instance<'tcx> { pub fn symbol_name(self, scx: &SharedCrateContext<'a, 'tcx>) -> String { - let Instance { def: def_id, ref substs } = self; + let Instance { def: def_id, substs } = self; debug!("symbol_name(def_id={:?}, substs={:?})", def_id, substs); @@ -185,6 +194,11 @@ impl<'a, 'tcx> Instance<'tcx> { let idx = def_id.index; return scx.sess().generate_plugin_registrar_symbol(svh, idx); } + if scx.sess().derive_registrar_fn.get() == Some(id) { + let svh = &scx.link_meta().crate_hash; + let idx = def_id.index; + return scx.sess().generate_derive_registrar_symbol(svh, idx); + } } // FIXME(eddyb) Precompute a custom symbol name based on attributes. @@ -252,7 +266,7 @@ impl<'a, 'tcx> Instance<'tcx> { // and should not matter anyhow. let instance_ty = scx.tcx().erase_regions(&instance_ty.ty); - let hash = get_symbol_hash(scx, &def_path, instance_ty, substs.types.as_slice()); + let hash = get_symbol_hash(scx, &def_path, instance_ty, Some(substs)); let mut buffer = SymbolPathBuffer { names: Vec::with_capacity(def_path.data.len()) @@ -262,7 +276,7 @@ impl<'a, 'tcx> Instance<'tcx> { scx.tcx().push_item_path(&mut buffer, def_id); }); - mangle(buffer.names.into_iter(), Some(&hash[..])) + mangle(buffer.names.into_iter(), &hash) } } @@ -282,32 +296,16 @@ impl ItemPathBuffer for SymbolPathBuffer { } pub fn exported_name_from_type_and_prefix<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, - t: ty::Ty<'tcx>, + t: Ty<'tcx>, prefix: &str) -> String { let empty_def_path = DefPath { data: vec![], - krate: cstore::LOCAL_CRATE, + krate: LOCAL_CRATE, }; - let hash = get_symbol_hash(scx, &empty_def_path, t, &[]); + let hash = get_symbol_hash(scx, &empty_def_path, t, None); let path = [token::intern_and_get_ident(prefix)]; - mangle(path.iter().cloned(), Some(&hash[..])) -} - -/// Only symbols that are invisible outside their compilation unit should use a -/// name generated by this function. -pub fn internal_name_from_type_and_suffix<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - t: ty::Ty<'tcx>, - suffix: &str) - -> String { - let path = [token::intern(&t.to_string()).as_str(), - gensym_name(suffix).as_str()]; - let def_path = DefPath { - data: vec![], - krate: cstore::LOCAL_CRATE, - }; - let hash = get_symbol_hash(ccx.shared(), &def_path, t, &[]); - mangle(path.iter().cloned(), Some(&hash[..])) + mangle(path.iter().cloned(), &hash) } // Name sanitation. LLVM will happily accept identifiers with weird names, but @@ -360,7 +358,7 @@ pub fn sanitize(s: &str) -> String { return result; } -pub fn mangle>(path: PI, hash: Option<&str>) -> String { +fn mangle>(path: PI, hash: &str) -> String { // Follow C++ namespace-mangling style, see // http://en.wikipedia.org/wiki/Name_mangling for more info. // @@ -387,9 +385,7 @@ pub fn mangle>(path: PI, hash: Option<&str>) - push(&mut n, &data); } - if let Some(s) = hash { - push(&mut n, s) - } + push(&mut n, hash); n.push('E'); // End name-sequence. n diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 4b9c29d3d7..04b814e2b9 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -10,7 +10,7 @@ use back::lto; use back::link::{get_linker, remove}; -use rustc_incremental::save_trans_partition; +use rustc_incremental::{save_trans_partition, in_incr_comp_dir}; use session::config::{OutputFilenames, OutputTypes, Passes, SomePasses, AllPasses}; use session::Session; use session::config::{self, OutputType}; @@ -328,8 +328,9 @@ struct CodegenContext<'a> { remark: Passes, // Worker thread number worker: usize, - // Directory where incremental data is stored (if any) - incremental: Option, + // The incremental compilation session directory, or None if we are not + // compiling incrementally + incr_comp_session_dir: Option } impl<'a> CodegenContext<'a> { @@ -340,7 +341,7 @@ impl<'a> CodegenContext<'a> { plugin_passes: sess.plugin_llvm_passes.borrow().clone(), remark: sess.opts.cg.remark.clone(), worker: 0, - incremental: sess.opts.incremental.clone(), + incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()) } } } @@ -962,17 +963,20 @@ fn execute_work_item(cgcx: &CodegenContext, work_item.output_names); } ModuleSource::Preexisting(wp) => { - let incremental = cgcx.incremental.as_ref().unwrap(); + let incr_comp_session_dir = cgcx.incr_comp_session_dir + .as_ref() + .unwrap(); let name = &work_item.mtrans.name; for (kind, saved_file) in wp.saved_files { let obj_out = work_item.output_names.temp_path(kind, Some(name)); - let source_file = incremental.join(&saved_file); + let source_file = in_incr_comp_dir(&incr_comp_session_dir, + &saved_file); debug!("copying pre-existing module `{}` from {:?} to {}", work_item.mtrans.name, source_file, obj_out.display()); match link_or_copy(&source_file, &obj_out) { - Ok(()) => { } + Ok(_) => { } Err(err) => { cgcx.handler.err(&format!("unable to copy {} to {}: {}", source_file.display(), @@ -1018,7 +1022,7 @@ fn run_work_multithreaded(sess: &Session, let mut tx = Some(tx); futures.push(rx); - let incremental = sess.opts.incremental.clone(); + let incr_comp_session_dir = sess.incr_comp_session_dir_opt().map(|r| r.clone()); thread::Builder::new().name(format!("codegen-{}", i)).spawn(move || { let diag_handler = Handler::with_emitter(true, false, box diag_emitter); @@ -1031,7 +1035,7 @@ fn run_work_multithreaded(sess: &Session, plugin_passes: plugin_passes, remark: remark, worker: i, - incremental: incremental, + incr_comp_session_dir: incr_comp_session_dir }; loop { @@ -1126,10 +1130,10 @@ pub unsafe fn with_llvm_pmb(llmod: ModuleRef, // inline with lifetime intrinsics, and O2+ we add an inliner with a // thresholds copied from clang. match (opt_level, opt_size, inline_threshold) { - (_, _, Some(t)) => { + (.., Some(t)) => { llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t as u32); } - (llvm::CodeGenOptLevel::Aggressive, _, _) => { + (llvm::CodeGenOptLevel::Aggressive, ..) => { llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275); } (_, llvm::CodeGenOptSizeDefault, _) => { @@ -1138,16 +1142,16 @@ pub unsafe fn with_llvm_pmb(llmod: ModuleRef, (_, llvm::CodeGenOptSizeAggressive, _) => { llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25); } - (llvm::CodeGenOptLevel::None, _, _) => { + (llvm::CodeGenOptLevel::None, ..) => { llvm::LLVMRustAddAlwaysInlinePass(builder, false); } - (llvm::CodeGenOptLevel::Less, _, _) => { + (llvm::CodeGenOptLevel::Less, ..) => { llvm::LLVMRustAddAlwaysInlinePass(builder, true); } - (llvm::CodeGenOptLevel::Default, _, _) => { + (llvm::CodeGenOptLevel::Default, ..) => { llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225); } - (llvm::CodeGenOptLevel::Other, _, _) => { + (llvm::CodeGenOptLevel::Other, ..) => { bug!("CodeGenOptLevel::Other selected") } } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 479a6a2cac..a2f2e9dd8d 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -33,13 +33,11 @@ use super::ModuleTranslation; use assert_module_sources; use back::link; use back::linker::LinkerInfo; -use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param}; +use llvm::{Linkage, ValueRef, Vector, get_param}; use llvm; -use rustc::cfg; use rustc::hir::def_id::DefId; use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem}; -use rustc::hir::pat_util::simple_name; -use rustc::ty::subst::{self, Substs}; +use rustc::ty::subst::Substs; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::ty::adjustment::CustomCoerceUnsized; @@ -47,35 +45,27 @@ use rustc::dep_graph::{DepNode, WorkProduct}; use rustc::hir::map as hir_map; use rustc::util::common::time; use rustc::mir::mir_map::MirMap; -use rustc_data_structures::graph::OUTGOING; -use session::config::{self, NoDebugInfo, FullDebugInfo}; +use session::config::{self, NoDebugInfo}; +use rustc_incremental::IncrementalHashesMap; use session::Session; -use _match; use abi::{self, Abi, FnType}; use adt; use attributes; use build::*; use builder::{Builder, noname}; -use callee::{Callee, CallArgs, ArgExprs, ArgVals}; -use cleanup::{self, CleanupMethods, DropHint}; -use closure; -use common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_uint, C_integral}; +use callee::{Callee}; +use common::{Block, C_bool, C_bytes_in_context, C_i32, C_uint}; use collector::{self, TransItemCollectionMode}; use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef}; -use common::{CrateContext, DropFlagHintsMap, Field, FunctionContext}; -use common::{Result, NodeIdAndSpan, VariantInfo}; -use common::{node_id_type, fulfill_obligation}; -use common::{type_is_immediate, type_is_zero_size, val_ty}; +use common::{CrateContext, FunctionContext}; +use common::{Result}; +use common::{fulfill_obligation}; +use common::{type_is_zero_size, val_ty}; use common; use consts; use context::{SharedCrateContext, CrateContextList}; -use controlflow; -use datum; -use debuginfo::{self, DebugLoc, ToDebugLoc}; +use debuginfo::{self, DebugLoc}; use declare; -use expr; -use glue; -use inline; use machine; use machine::{llalign_of_min, llsize_of}; use meth; @@ -85,30 +75,24 @@ use partitioning::{self, PartitioningStrategy, CodegenUnit}; use symbol_map::SymbolMap; use symbol_names_test; use trans_item::TransItem; -use tvec; use type_::Type; use type_of; use value::Value; use Disr; -use util::common::indenter; use util::sha2::Sha256; -use util::nodemap::{NodeMap, NodeSet, FnvHashSet}; +use util::nodemap::{NodeSet, FnvHashMap, FnvHashSet}; use arena::TypedArena; use libc::c_uint; use std::ffi::{CStr, CString}; use std::borrow::Cow; use std::cell::{Cell, RefCell}; -use std::collections::HashMap; use std::ptr; use std::rc::Rc; use std::str; -use std::{i8, i16, i32, i64}; +use std::i32; use syntax_pos::{Span, DUMMY_SP}; -use syntax::parse::token::InternedString; -use syntax::attr::AttrMetaMethods; use syntax::attr; -use rustc::hir::intravisit::{self, Visitor}; use rustc::hir; use syntax::ast; @@ -191,8 +175,12 @@ impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> { } } -pub fn kind_for_closure(ccx: &CrateContext, closure_id: DefId) -> ty::ClosureKind { - *ccx.tcx().tables.borrow().closure_kinds.get(&closure_id).unwrap() +pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef { + StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA) +} + +pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef { + StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR) } fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId { @@ -218,8 +206,8 @@ pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Allocate space: let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem); - let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().mk_substs(Substs::empty())) - .call(bcx, debug_loc, ArgVals(&[size, align]), None); + let r = Callee::def(bcx.ccx(), def_id, Substs::empty(bcx.tcx())) + .call(bcx, debug_loc, &[size, align], None); Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr)) } @@ -403,154 +391,6 @@ pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty) } -// Iterates through the elements of a structural type. -pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, - av: ValueRef, - t: Ty<'tcx>, - mut f: F) - -> Block<'blk, 'tcx> - where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx> -{ - let _icx = push_ctxt("iter_structural_ty"); - - fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, - repr: &adt::Repr<'tcx>, - av: adt::MaybeSizedValue, - variant: ty::VariantDef<'tcx>, - substs: &Substs<'tcx>, - f: &mut F) - -> Block<'blk, 'tcx> - where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx> - { - let _icx = push_ctxt("iter_variant"); - let tcx = cx.tcx(); - let mut cx = cx; - - for (i, field) in variant.fields.iter().enumerate() { - let arg = monomorphize::field_ty(tcx, substs, field); - cx = f(cx, - adt::trans_field_ptr(cx, repr, av, Disr::from(variant.disr_val), i), - arg); - } - return cx; - } - - let value = if common::type_is_sized(cx.tcx(), t) { - adt::MaybeSizedValue::sized(av) - } else { - let data = Load(cx, expr::get_dataptr(cx, av)); - let info = Load(cx, expr::get_meta(cx, av)); - adt::MaybeSizedValue::unsized_(data, info) - }; - - let mut cx = cx; - match t.sty { - ty::TyStruct(..) => { - let repr = adt::represent_type(cx.ccx(), t); - let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); - for (i, &Field(_, field_ty)) in fields.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr::from(discr), i); - - let val = if common::type_is_sized(cx.tcx(), field_ty) { - llfld_a - } else { - let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter"); - Store(cx, llfld_a, expr::get_dataptr(cx, scratch.val)); - Store(cx, value.meta, expr::get_meta(cx, scratch.val)); - scratch.val - }; - cx = f(cx, val, field_ty); - } - } - ty::TyClosure(_, ref substs) => { - let repr = adt::represent_type(cx.ccx(), t); - for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() { - let llupvar = adt::trans_field_ptr(cx, &repr, value, Disr(0), i); - cx = f(cx, llupvar, upvar_ty); - } - } - ty::TyArray(_, n) => { - let (base, len) = tvec::get_fixed_base_and_len(cx, value.value, n); - let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f); - } - ty::TySlice(_) | ty::TyStr => { - let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::iter_vec_raw(cx, value.value, unit_ty, value.meta, f); - } - ty::TyTuple(ref args) => { - let repr = adt::represent_type(cx.ccx(), t); - for (i, arg) in args.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr(0), i); - cx = f(cx, llfld_a, *arg); - } - } - ty::TyEnum(en, substs) => { - let fcx = cx.fcx; - let ccx = fcx.ccx; - - let repr = adt::represent_type(ccx, t); - let n_variants = en.variants.len(); - - // NB: we must hit the discriminant first so that structural - // comparison know not to proceed when the discriminants differ. - - match adt::trans_switch(cx, &repr, av, false) { - (_match::Single, None) => { - if n_variants != 0 { - assert!(n_variants == 1); - cx = iter_variant(cx, &repr, adt::MaybeSizedValue::sized(av), - &en.variants[0], substs, &mut f); - } - } - (_match::Switch, Some(lldiscrim_a)) => { - cx = f(cx, lldiscrim_a, cx.tcx().types.isize); - - // Create a fall-through basic block for the "else" case of - // the switch instruction we're about to generate. Note that - // we do **not** use an Unreachable instruction here, even - // though most of the time this basic block will never be hit. - // - // When an enum is dropped it's contents are currently - // overwritten to DTOR_DONE, which means the discriminant - // could have changed value to something not within the actual - // range of the discriminant. Currently this function is only - // used for drop glue so in this case we just return quickly - // from the outer function, and any other use case will only - // call this for an already-valid enum in which case the `ret - // void` will never be hit. - let ret_void_cx = fcx.new_temp_block("enum-iter-ret-void"); - RetVoid(ret_void_cx, DebugLoc::None); - let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants); - let next_cx = fcx.new_temp_block("enum-iter-next"); - - for variant in &en.variants { - let variant_cx = fcx.new_temp_block(&format!("enum-iter-variant-{}", - &variant.disr_val - .to_string())); - let case_val = adt::trans_case(cx, &repr, Disr::from(variant.disr_val)); - AddCase(llswitch, case_val, variant_cx.llbb); - let variant_cx = iter_variant(variant_cx, - &repr, - value, - variant, - substs, - &mut f); - Br(variant_cx, next_cx.llbb, DebugLoc::None); - } - cx = next_cx; - } - _ => ccx.sess().unimpl("value from adt::trans_switch in iter_structural_ty"), - } - } - _ => { - cx.sess().unimpl(&format!("type in iter_structural_ty: {}", t)) - } - } - return cx; -} - - /// Retrieve the information we are losing (making dynamic) in an unsizing /// adjustment. /// @@ -571,14 +411,9 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, // change to the vtable. old_info.expect("unsized_info: missing old info for trait upcast") } - (_, &ty::TyTrait(box ty::TraitTy { ref principal, .. })) => { - // Note that we preserve binding levels here: - let substs = principal.0.substs.with_self_ty(source).erase_regions(); - let substs = ccx.tcx().mk_substs(substs); - let trait_ref = ty::Binder(ty::TraitRef { - def_id: principal.def_id(), - substs: substs, - }); + (_, &ty::TyTrait(ref data)) => { + let trait_ref = data.principal.with_self_ty(ccx.tcx(), source); + let trait_ref = ccx.tcx().erase_regions(&trait_ref); consts::ptrcast(meth::get_vtable(ccx, trait_ref), Type::vtable_ptr(ccx)) } @@ -640,33 +475,27 @@ pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, store_fat_ptr(bcx, base, info, dst, dst_ty); } - // This can be extended to enums and tuples in the future. - // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) | - (&ty::TyStruct(def_a, _), &ty::TyStruct(def_b, _)) => { + (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => { assert_eq!(def_a, def_b); - let src_repr = adt::represent_type(bcx.ccx(), src_ty); - let src_fields = match &*src_repr { - &adt::Repr::Univariant(ref s, _) => &s.fields, - _ => bug!("struct has non-univariant repr"), - }; - let dst_repr = adt::represent_type(bcx.ccx(), dst_ty); - let dst_fields = match &*dst_repr { - &adt::Repr::Univariant(ref s, _) => &s.fields, - _ => bug!("struct has non-univariant repr"), - }; + let src_fields = def_a.variants[0].fields.iter().map(|f| { + monomorphize::field_ty(bcx.tcx(), substs_a, f) + }); + let dst_fields = def_b.variants[0].fields.iter().map(|f| { + monomorphize::field_ty(bcx.tcx(), substs_b, f) + }); let src = adt::MaybeSizedValue::sized(src); let dst = adt::MaybeSizedValue::sized(dst); - let iter = src_fields.iter().zip(dst_fields).enumerate(); + let iter = src_fields.zip(dst_fields).enumerate(); for (i, (src_fty, dst_fty)) in iter { if type_is_zero_size(bcx.ccx(), dst_fty) { continue; } - let src_f = adt::trans_field_ptr(bcx, &src_repr, src, Disr(0), i); - let dst_f = adt::trans_field_ptr(bcx, &dst_repr, dst, Disr(0), i); + let src_f = adt::trans_field_ptr(bcx, src_ty, src, Disr(0), i); + let dst_f = adt::trans_field_ptr(bcx, dst_ty, dst, Disr(0), i); if src_fty == dst_fty { memcpy_ty(bcx, dst_f, src_f, src_fty); } else { @@ -684,14 +513,9 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx source_ty: Ty<'tcx>, target_ty: Ty<'tcx>) -> CustomCoerceUnsized { - let trait_substs = Substs::new(subst::VecPerParamSpace::new(vec![target_ty], - vec![source_ty], - Vec::new()), - subst::VecPerParamSpace::empty()); - let trait_ref = ty::Binder(ty::TraitRef { def_id: scx.tcx().lang_items.coerce_unsized_trait().unwrap(), - substs: scx.tcx().mk_substs(trait_substs) + substs: Substs::new_trait(scx.tcx(), source_ty, &[target_ty]) }); match fulfill_obligation(scx, DUMMY_SP, trait_ref) { @@ -751,101 +575,6 @@ fn cast_shift_rhs(op: hir::BinOp_, } } -pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - val_t: Ty<'tcx>) - -> (Type, u64) { - match val_t.sty { - ty::TyInt(t) => { - let llty = Type::int_from_ty(cx.ccx(), t); - let min = match t { - ast::IntTy::Is if llty == Type::i32(cx.ccx()) => i32::MIN as u64, - ast::IntTy::Is => i64::MIN as u64, - ast::IntTy::I8 => i8::MIN as u64, - ast::IntTy::I16 => i16::MIN as u64, - ast::IntTy::I32 => i32::MIN as u64, - ast::IntTy::I64 => i64::MIN as u64, - }; - (llty, min) - } - _ => bug!(), - } -} - -pub fn fail_if_zero_or_overflows<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - call_info: NodeIdAndSpan, - divrem: hir::BinOp, - lhs: ValueRef, - rhs: ValueRef, - rhs_t: Ty<'tcx>) - -> Block<'blk, 'tcx> { - use rustc_const_math::{ConstMathErr, Op}; - - let (zero_err, overflow_err) = if divrem.node == hir::BiDiv { - (ConstMathErr::DivisionByZero, ConstMathErr::Overflow(Op::Div)) - } else { - (ConstMathErr::RemainderByZero, ConstMathErr::Overflow(Op::Rem)) - }; - let debug_loc = call_info.debug_loc(); - - let (is_zero, is_signed) = match rhs_t.sty { - ty::TyInt(t) => { - let zero = C_integral(Type::int_from_ty(cx.ccx(), t), 0, false); - (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), true) - } - ty::TyUint(t) => { - let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0, false); - (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), false) - } - ty::TyStruct(def, _) if def.is_simd() => { - let mut res = C_bool(cx.ccx(), false); - for i in 0..rhs_t.simd_size(cx.tcx()) { - res = Or(cx, - res, - IsNull(cx, ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))), - debug_loc); - } - (res, false) - } - _ => { - bug!("fail-if-zero on unexpected type: {}", rhs_t); - } - }; - let bcx = with_cond(cx, is_zero, |bcx| { - controlflow::trans_fail(bcx, call_info, InternedString::new(zero_err.description())) - }); - - // To quote LLVM's documentation for the sdiv instruction: - // - // Division by zero leads to undefined behavior. Overflow also leads - // to undefined behavior; this is a rare case, but can occur, for - // example, by doing a 32-bit division of -2147483648 by -1. - // - // In order to avoid undefined behavior, we perform runtime checks for - // signed division/remainder which would trigger overflow. For unsigned - // integers, no action beyond checking for zero need be taken. - if is_signed { - let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t); - let minus_one = ICmp(bcx, - llvm::IntEQ, - rhs, - C_integral(llty, !0, false), - debug_loc); - with_cond(bcx, minus_one, |bcx| { - let is_min = ICmp(bcx, - llvm::IntEQ, - lhs, - C_integral(llty, min, true), - debug_loc); - with_cond(bcx, is_min, |bcx| { - controlflow::trans_fail(bcx, call_info, - InternedString::new(overflow_err.description())) - }) - }) - } else { - bcx - } -} - pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, llfn: ValueRef, llargs: &[ValueRef], @@ -856,21 +585,12 @@ pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, return (C_null(Type::i8(bcx.ccx())), bcx); } - match bcx.opt_node_id { - None => { - debug!("invoke at ???"); - } - Some(id) => { - debug!("invoke at {}", bcx.tcx().map.node_to_string(id)); - } - } - if need_invoke(bcx) { debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb); for &llarg in llargs { debug!("arg: {:?}", Value(llarg)); } - let normal_bcx = bcx.fcx.new_temp_block("normal-return"); + let normal_bcx = bcx.fcx.new_block("normal-return"); let landing_pad = bcx.fcx.get_landing_pad(); let llresult = Invoke(bcx, @@ -912,14 +632,6 @@ pub fn need_invoke(bcx: Block) -> bool { } } -pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) -> ValueRef { - let _icx = push_ctxt("load_if_immediate"); - if type_is_immediate(cx.ccx(), t) { - return load_ty(cx, v, t); - } - return v; -} - /// Helper for loading values from memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. Also handles various special cases where the type /// gives us better information about what we are loading. @@ -975,10 +687,10 @@ pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t if common::type_is_fat_ptr(cx.tcx(), t) { Store(cx, ExtractValue(cx, v, abi::FAT_PTR_ADDR), - expr::get_dataptr(cx, dst)); + get_dataptr(cx, dst)); Store(cx, ExtractValue(cx, v, abi::FAT_PTR_EXTRA), - expr::get_meta(cx, dst)); + get_meta(cx, dst)); } else { Store(cx, from_immediate(cx, v), dst); } @@ -990,8 +702,8 @@ pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, dst: ValueRef, _ty: Ty<'tcx>) { // FIXME: emit metadata - Store(cx, data, expr::get_dataptr(cx, dst)); - Store(cx, extra, expr::get_meta(cx, dst)); + Store(cx, data, get_dataptr(cx, dst)); + Store(cx, extra, get_meta(cx, dst)); } pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, @@ -999,8 +711,8 @@ pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, _ty: Ty<'tcx>) -> (ValueRef, ValueRef) { // FIXME: emit metadata - (Load(cx, expr::get_dataptr(cx, src)), - Load(cx, expr::get_meta(cx, src))) + (Load(cx, get_dataptr(cx, src)), + Load(cx, get_meta(cx, src))) } pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef { @@ -1019,19 +731,6 @@ pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { } } -pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local) -> Block<'blk, 'tcx> { - debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id); - let _indenter = indenter(); - let _icx = push_ctxt("init_local"); - _match::store_local(bcx, local) -} - -pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>, - llbb: BasicBlockRef) - -> Block<'blk, 'tcx> { - common::BlockS::new(llbb, None, fcx) -} - pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx> where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx> { @@ -1042,8 +741,8 @@ pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> } let fcx = bcx.fcx; - let next_cx = fcx.new_temp_block("next"); - let cond_cx = fcx.new_temp_block("cond"); + let next_cx = fcx.new_block("next"); + let cond_cx = fcx.new_block("cond"); CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None); let after_cx = f(cond_cx); if !after_cx.terminated.get() { @@ -1117,7 +816,7 @@ pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) { } else { let exc_ptr = ExtractValue(bcx, lpval, 0); bcx.fcx.eh_unwind_resume() - .call(bcx, DebugLoc::None, ArgVals(&[exc_ptr]), None); + .call(bcx, DebugLoc::None, &[exc_ptr], None); } } @@ -1160,15 +859,6 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe } } -pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { - if cx.unreachable.get() { - return; - } - let _icx = push_ctxt("drop_done_fill_mem"); - let bcx = cx; - memfill(&B(bcx), llptr, t, adt::DTOR_DONE); -} - pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { if cx.unreachable.get() { return; @@ -1208,82 +898,11 @@ pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); } - -/// In general, when we create an scratch value in an alloca, the -/// creator may not know if the block (that initializes the scratch -/// with the desired value) actually dominates the cleanup associated -/// with the scratch value. -/// -/// To deal with this, when we do an alloca (at the *start* of whole -/// function body), we optionally can also set the associated -/// dropped-flag state of the alloca to "dropped." -#[derive(Copy, Clone, Debug)] -pub enum InitAlloca { - /// Indicates that the state should have its associated drop flag - /// set to "dropped" at the point of allocation. - Dropped, - /// Indicates the value of the associated drop flag is irrelevant. - /// The embedded string literal is a programmer provided argument - /// for why. This is a safeguard forcing compiler devs to - /// document; it might be a good idea to also emit this as a - /// comment with the alloca itself when emitting LLVM output.ll. - Uninit(&'static str), -} - - pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, + ty: Ty<'tcx>, name: &str) -> ValueRef { - // pnkfelix: I do not know why alloc_ty meets the assumptions for - // passing Uninit, but it was never needed (even back when we had - // the original boolean `zero` flag on `lvalue_scratch_datum`). - alloc_ty_init(bcx, t, InitAlloca::Uninit("all alloc_ty are uninit"), name) -} - -/// This variant of `fn alloc_ty` does not necessarily assume that the -/// alloca should be created with no initial value. Instead the caller -/// controls that assumption via the `init` flag. -/// -/// Note that if the alloca *is* initialized via `init`, then we will -/// also inject an `llvm.lifetime.start` before that initialization -/// occurs, and thus callers should not call_lifetime_start -/// themselves. But if `init` says "uninitialized", then callers are -/// in charge of choosing where to call_lifetime_start and -/// subsequently populate the alloca. -/// -/// (See related discussion on PR #30823.) -pub fn alloc_ty_init<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - init: InitAlloca, - name: &str) -> ValueRef { - let _icx = push_ctxt("alloc_ty"); - let ccx = bcx.ccx(); - let ty = type_of::type_of(ccx, t); - assert!(!t.has_param_types()); - match init { - InitAlloca::Dropped => alloca_dropped(bcx, t, name), - InitAlloca::Uninit(_) => alloca(bcx, ty, name), - } -} - -pub fn alloca_dropped<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef { - let _icx = push_ctxt("alloca_dropped"); - let llty = type_of::type_of(cx.ccx(), ty); - if cx.unreachable.get() { - unsafe { return llvm::LLVMGetUndef(llty.ptr_to().to_ref()); } - } - let p = alloca(cx, llty, name); - let b = cx.fcx.ccx.builder(); - b.position_before(cx.fcx.alloca_insert_pt.get().unwrap()); - - // This is just like `call_lifetime_start` (but latter expects a - // Block, which we do not have for `alloca_insert_pt`). - core_lifetime_emit(cx.ccx(), p, Lifetime::Start, |ccx, size, lifetime_start| { - let ptr = b.pointercast(p, Type::i8p(ccx)); - b.call(lifetime_start, &[C_u64(ccx, size), ptr], None); - }); - memfill(&b, p, ty, adt::DTOR_DONE); - p + assert!(!ty.has_param_types()); + alloca(bcx, type_of::type_of(bcx.ccx(), ty), name) } pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef { @@ -1297,121 +916,6 @@ pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef { Alloca(cx, ty, name) } -pub fn set_value_name(val: ValueRef, name: &str) { - unsafe { - let name = CString::new(name).unwrap(); - llvm::LLVMSetValueName(val, name.as_ptr()); - } -} - -struct FindNestedReturn { - found: bool, -} - -impl FindNestedReturn { - fn new() -> FindNestedReturn { - FindNestedReturn { - found: false, - } - } -} - -impl<'v> Visitor<'v> for FindNestedReturn { - fn visit_expr(&mut self, e: &hir::Expr) { - match e.node { - hir::ExprRet(..) => { - self.found = true; - } - _ => intravisit::walk_expr(self, e), - } - } -} - -fn build_cfg<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: ast::NodeId) - -> (ast::NodeId, Option) { - let blk = match tcx.map.find(id) { - Some(hir_map::NodeItem(i)) => { - match i.node { - hir::ItemFn(_, _, _, _, _, ref blk) => { - blk - } - _ => bug!("unexpected item variant in has_nested_returns"), - } - } - Some(hir_map::NodeTraitItem(trait_item)) => { - match trait_item.node { - hir::MethodTraitItem(_, Some(ref body)) => body, - _ => { - bug!("unexpected variant: trait item other than a provided method in \ - has_nested_returns") - } - } - } - Some(hir_map::NodeImplItem(impl_item)) => { - match impl_item.node { - hir::ImplItemKind::Method(_, ref body) => body, - _ => { - bug!("unexpected variant: non-method impl item in has_nested_returns") - } - } - } - Some(hir_map::NodeExpr(e)) => { - match e.node { - hir::ExprClosure(_, _, ref blk, _) => blk, - _ => bug!("unexpected expr variant in has_nested_returns"), - } - } - Some(hir_map::NodeVariant(..)) | - Some(hir_map::NodeStructCtor(..)) => return (ast::DUMMY_NODE_ID, None), - - // glue, shims, etc - None if id == ast::DUMMY_NODE_ID => return (ast::DUMMY_NODE_ID, None), - - _ => bug!("unexpected variant in has_nested_returns: {}", - tcx.node_path_str(id)), - }; - - (blk.id, Some(cfg::CFG::new(tcx, blk))) -} - -// Checks for the presence of "nested returns" in a function. -// Nested returns are when the inner expression of a return expression -// (the 'expr' in 'return expr') contains a return expression. Only cases -// where the outer return is actually reachable are considered. Implicit -// returns from the end of blocks are considered as well. -// -// This check is needed to handle the case where the inner expression is -// part of a larger expression that may have already partially-filled the -// return slot alloca. This can cause errors related to clean-up due to -// the clobbering of the existing value in the return slot. -fn has_nested_returns(tcx: TyCtxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool { - for index in cfg.graph.depth_traverse(cfg.entry, OUTGOING) { - let n = cfg.graph.node_data(index); - match tcx.map.find(n.id()) { - Some(hir_map::NodeExpr(ex)) => { - if let hir::ExprRet(Some(ref ret_expr)) = ex.node { - let mut visitor = FindNestedReturn::new(); - intravisit::walk_expr(&mut visitor, &ret_expr); - if visitor.found { - return true; - } - } - } - Some(hir_map::NodeBlock(blk)) if blk.id == blk_id => { - let mut visitor = FindNestedReturn::new(); - walk_list!(&mut visitor, visit_expr, &blk.expr); - if visitor.found { - return true; - } - } - _ => {} - } - } - - return false; -} - impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// Create a function context for the given function. /// Beware that you must call `fcx.init` or `fcx.bind_args` @@ -1419,15 +923,15 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>, llfndecl: ValueRef, fn_ty: FnType, - definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi, ast::NodeId)>, + definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>, block_arena: &'blk TypedArena>) -> FunctionContext<'blk, 'tcx> { - let (param_substs, def_id, inlined_id) = match definition { - Some((instance, _, _, inlined_id)) => { + let (param_substs, def_id) = match definition { + Some((instance, ..)) => { common::validate_substs(instance.substs); - (instance.substs, Some(instance.def), Some(inlined_id)) + (instance.substs, Some(instance.def)) } - None => (ccx.tcx().mk_substs(Substs::empty()), None, None) + None => (Substs::empty(ccx.tcx()), None) }; let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id)); @@ -1435,70 +939,47 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { debug!("FunctionContext::new({})", definition.map_or(String::new(), |d| d.0.to_string())); - let cfg = inlined_id.map(|id| build_cfg(ccx.tcx(), id)); - let nested_returns = if let Some((blk_id, Some(ref cfg))) = cfg { - has_nested_returns(ccx.tcx(), cfg, blk_id) - } else { - false - }; - - let check_attrs = |attrs: &[ast::Attribute]| { - let default_to_mir = ccx.sess().opts.debugging_opts.orbit; - let invert = if default_to_mir { "rustc_no_mir" } else { "rustc_mir" }; - (default_to_mir ^ attrs.iter().any(|item| item.check_name(invert)), - attrs.iter().any(|item| item.check_name("no_debug"))) - }; - - let (use_mir, no_debug) = if let Some(id) = local_id { - check_attrs(ccx.tcx().map.attrs(id)) + let no_debug = if let Some(id) = local_id { + ccx.tcx().map.attrs(id) + .iter().any(|item| item.check_name("no_debug")) } else if let Some(def_id) = def_id { - check_attrs(&ccx.sess().cstore.item_attrs(def_id)) + ccx.sess().cstore.item_attrs(def_id) + .iter().any(|item| item.check_name("no_debug")) } else { - check_attrs(&[]) + false }; - let mir = if use_mir { - def_id.and_then(|id| ccx.get_mir(id)) - } else { - None - }; + let mir = def_id.and_then(|id| ccx.get_mir(id)); - let debug_context = if let (false, Some(definition)) = (no_debug, definition) { - let (instance, sig, abi, _) = definition; - debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl) + let debug_context = if let (false, Some((instance, sig, abi)), &Some(ref mir)) = + (no_debug, definition, &mir) { + debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl, mir) } else { debuginfo::empty_function_debug_context(ccx) }; FunctionContext { - needs_ret_allocas: nested_returns && mir.is_none(), mir: mir, llfn: llfndecl, llretslotptr: Cell::new(None), param_env: ccx.tcx().empty_parameter_environment(), alloca_insert_pt: Cell::new(None), - llreturn: Cell::new(None), landingpad_alloca: Cell::new(None), - lllocals: RefCell::new(NodeMap()), - llupvars: RefCell::new(NodeMap()), - lldropflag_hints: RefCell::new(DropFlagHintsMap::new()), fn_ty: fn_ty, param_substs: param_substs, - span: inlined_id.and_then(|id| ccx.tcx().map.opt_span(id)), + span: None, block_arena: block_arena, lpad_arena: TypedArena::new(), ccx: ccx, debug_context: debug_context, scopes: RefCell::new(Vec::new()), - cfg: cfg.and_then(|(_, cfg)| cfg) } } /// Performs setup on a newly created function, creating the entry /// scope block and allocating space for the return pointer. - pub fn init(&'blk self, skip_retptr: bool, fn_did: Option) - -> Block<'blk, 'tcx> { - let entry_bcx = self.new_temp_block("entry-block"); + pub fn init(&'blk self, skip_retptr: bool) -> Block<'blk, 'tcx> { + let entry_bcx = self.new_block("entry-block"); // Use a dummy instruction as the insertion point for all allocas. // This is later removed in FunctionContext::cleanup. @@ -1516,244 +997,26 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // which will hold the pointer to the right alloca which has the // final ret value let llty = self.fn_ty.ret.memory_ty(self.ccx); - let slot = if self.needs_ret_allocas { - // Let's create the stack slot - let slot = AllocaFcx(self, llty.ptr_to(), "llretslotptr"); - - // and if we're using an out pointer, then store that in our newly made slot - if self.fn_ty.ret.is_indirect() { - let outptr = get_param(self.llfn, 0); - - let b = self.ccx.builder(); - b.position_before(self.alloca_insert_pt.get().unwrap()); - b.store(outptr, slot); - } - - slot + // But if there are no nested returns, we skip the indirection + // and have a single retslot + let slot = if self.fn_ty.ret.is_indirect() { + get_param(self.llfn, 0) } else { - // But if there are no nested returns, we skip the indirection - // and have a single retslot - if self.fn_ty.ret.is_indirect() { - get_param(self.llfn, 0) - } else { - AllocaFcx(self, llty, "sret_slot") - } + AllocaFcx(self, llty, "sret_slot") }; self.llretslotptr.set(Some(slot)); } - // Create the drop-flag hints for every unfragmented path in the function. - let tcx = self.ccx.tcx(); - let tables = tcx.tables.borrow(); - let mut hints = self.lldropflag_hints.borrow_mut(); - let fragment_infos = tcx.fragment_infos.borrow(); - - // Intern table for drop-flag hint datums. - let mut seen = HashMap::new(); - - let fragment_infos = fn_did.and_then(|did| fragment_infos.get(&did)); - if let Some(fragment_infos) = fragment_infos { - for &info in fragment_infos { - - let make_datum = |id| { - let init_val = C_u8(self.ccx, adt::DTOR_NEEDED_HINT); - let llname = &format!("dropflag_hint_{}", id); - debug!("adding hint {}", llname); - let ty = tcx.types.u8; - let ptr = alloc_ty(entry_bcx, ty, llname); - Store(entry_bcx, init_val, ptr); - let flag = datum::Lvalue::new_dropflag_hint("FunctionContext::init"); - datum::Datum::new(ptr, ty, flag) - }; - - let (var, datum) = match info { - ty::FragmentInfo::Moved { var, .. } | - ty::FragmentInfo::Assigned { var, .. } => { - let opt_datum = seen.get(&var).cloned().unwrap_or_else(|| { - let ty = tables.node_types[&var]; - if self.type_needs_drop(ty) { - let datum = make_datum(var); - seen.insert(var, Some(datum.clone())); - Some(datum) - } else { - // No drop call needed, so we don't need a dropflag hint - None - } - }); - if let Some(datum) = opt_datum { - (var, datum) - } else { - continue - } - } - }; - match info { - ty::FragmentInfo::Moved { move_expr: expr_id, .. } => { - debug!("FragmentInfo::Moved insert drop hint for {}", expr_id); - hints.insert(expr_id, DropHint::new(var, datum)); - } - ty::FragmentInfo::Assigned { assignee_id: expr_id, .. } => { - debug!("FragmentInfo::Assigned insert drop hint for {}", expr_id); - hints.insert(expr_id, DropHint::new(var, datum)); - } - } - } - } - entry_bcx } - /// Creates lvalue datums for each of the incoming function arguments, - /// matches all argument patterns against them to produce bindings, - /// and returns the entry block (see FunctionContext::init). - fn bind_args(&'blk self, - args: &[hir::Arg], - abi: Abi, - id: ast::NodeId, - closure_env: closure::ClosureEnv, - arg_scope: cleanup::CustomScopeIndex) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("FunctionContext::bind_args"); - let fn_did = self.ccx.tcx().map.local_def_id(id); - let mut bcx = self.init(false, Some(fn_did)); - let arg_scope_id = cleanup::CustomScope(arg_scope); - - let mut idx = 0; - let mut llarg_idx = self.fn_ty.ret.is_indirect() as usize; - - let has_tupled_arg = match closure_env { - closure::ClosureEnv::NotClosure => abi == Abi::RustCall, - closure::ClosureEnv::Closure(..) => { - closure_env.load(bcx, arg_scope_id); - let env_arg = &self.fn_ty.args[idx]; - idx += 1; - if env_arg.pad.is_some() { - llarg_idx += 1; - } - if !env_arg.is_ignore() { - llarg_idx += 1; - } - false - } - }; - let tupled_arg_id = if has_tupled_arg { - args[args.len() - 1].id - } else { - ast::DUMMY_NODE_ID - }; - - // Return an array wrapping the ValueRefs that we get from `get_param` for - // each argument into datums. - // - // For certain mode/type combinations, the raw llarg values are passed - // by value. However, within the fn body itself, we want to always - // have all locals and arguments be by-ref so that we can cancel the - // cleanup and for better interaction with LLVM's debug info. So, if - // the argument would be passed by value, we store it into an alloca. - // This alloca should be optimized away by LLVM's mem-to-reg pass in - // the event it's not truly needed. - let uninit_reason = InitAlloca::Uninit("fn_arg populate dominates dtor"); - for hir_arg in args { - let arg_ty = node_id_type(bcx, hir_arg.id); - let arg_datum = if hir_arg.id != tupled_arg_id { - let arg = &self.fn_ty.args[idx]; - idx += 1; - if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo { - // Don't copy an indirect argument to an alloca, the caller - // already put it in a temporary alloca and gave it up, unless - // we emit extra-debug-info, which requires local allocas :(. - let llarg = get_param(self.llfn, llarg_idx as c_uint); - llarg_idx += 1; - self.schedule_lifetime_end(arg_scope_id, llarg); - self.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None); - - datum::Datum::new(llarg, - arg_ty, - datum::Lvalue::new("FunctionContext::bind_args")) - } else { - unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "", - uninit_reason, - arg_scope_id, |bcx, dst| { - debug!("FunctionContext::bind_args: {:?}: {:?}", hir_arg, arg_ty); - let b = &bcx.build(); - if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { - let meta = &self.fn_ty.args[idx]; - idx += 1; - arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst)); - meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst)); - } else { - arg.store_fn_arg(b, &mut llarg_idx, dst); - } - bcx - })) - } - } else { - // FIXME(pcwalton): Reduce the amount of code bloat this is responsible for. - let tupled_arg_tys = match arg_ty.sty { - ty::TyTuple(ref tys) => tys, - _ => bug!("last argument of `rust-call` fn isn't a tuple?!") - }; - - unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, - arg_ty, - "tupled_args", - uninit_reason, - arg_scope_id, - |bcx, llval| { - debug!("FunctionContext::bind_args: tupled {:?}: {:?}", hir_arg, arg_ty); - for (j, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { - let dst = StructGEP(bcx, llval, j); - let arg = &self.fn_ty.args[idx]; - idx += 1; - let b = &bcx.build(); - if common::type_is_fat_ptr(bcx.tcx(), tupled_arg_ty) { - let meta = &self.fn_ty.args[idx]; - idx += 1; - arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst)); - meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst)); - } else { - arg.store_fn_arg(b, &mut llarg_idx, dst); - } - } - bcx - })) - }; - - let pat = &hir_arg.pat; - bcx = if let Some(name) = simple_name(pat) { - // Generate nicer LLVM for the common case of fn a pattern - // like `x: T` - set_value_name(arg_datum.val, &bcx.name(name)); - self.lllocals.borrow_mut().insert(pat.id, arg_datum); - bcx - } else { - // General path. Copy out the values that are used in the - // pattern. - _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id) - }; - debuginfo::create_argument_metadata(bcx, hir_arg); - } - - bcx - } - /// Ties up the llstaticallocas -> llloadenv -> lltop edges, /// and builds the return block. - pub fn finish(&'blk self, last_bcx: Block<'blk, 'tcx>, + pub fn finish(&'blk self, ret_cx: Block<'blk, 'tcx>, ret_debug_loc: DebugLoc) { let _icx = push_ctxt("FunctionContext::finish"); - let ret_cx = match self.llreturn.get() { - Some(llreturn) => { - if !last_bcx.terminated.get() { - Br(last_bcx, llreturn, DebugLoc::None); - } - raw_block(self, llreturn) - } - None => last_bcx, - }; - self.build_return_block(ret_cx, ret_debug_loc); DebugLoc::None.apply(self); @@ -1765,15 +1028,11 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { ret_debug_location: DebugLoc) { if self.llretslotptr.get().is_none() || ret_cx.unreachable.get() || - (!self.needs_ret_allocas && self.fn_ty.ret.is_indirect()) { + self.fn_ty.ret.is_indirect() { return RetVoid(ret_cx, ret_debug_location); } - let retslot = if self.needs_ret_allocas { - Load(ret_cx, self.llretslotptr.get().unwrap()) - } else { - self.llretslotptr.get().unwrap() - }; + let retslot = self.llretslotptr.get().unwrap(); let retptr = Value(retslot); let llty = self.fn_ty.ret.original_ty; match (retptr.get_dominating_store(ret_cx), self.fn_ty.ret.cast) { @@ -1832,14 +1091,10 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// /// If the function closes over its environment a closure will be returned. pub fn trans_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - decl: &hir::FnDecl, - body: &hir::Block, llfndecl: ValueRef, instance: Instance<'tcx>, - inlined_id: ast::NodeId, sig: &ty::FnSig<'tcx>, - abi: Abi, - closure_env: closure::ClosureEnv) { + abi: Abi) { ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1); let _icx = push_ctxt("trans_closure"); @@ -1859,228 +1114,75 @@ pub fn trans_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fcx = FunctionContext::new(ccx, llfndecl, fn_ty, - Some((instance, sig, abi, inlined_id)), + Some((instance, sig, abi)), &arena); - if fcx.mir.is_some() { - return mir::trans_mir(&fcx); - } - - debuginfo::fill_scope_map_for_function(&fcx, decl, body, inlined_id); - - // cleanup scope for the incoming arguments - let fn_cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node( - ccx, inlined_id, body.span, true); - let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc); - - // Set up arguments to the function. - debug!("trans_closure: function: {:?}", Value(fcx.llfn)); - let bcx = fcx.bind_args(&decl.inputs, abi, inlined_id, closure_env, arg_scope); - - // Up until here, IR instructions for this function have explicitly not been annotated with - // source code location, so we don't step into call setup code. From here on, source location - // emitting should be enabled. - debuginfo::start_emitting_source_locations(&fcx); - - let dest = if fcx.fn_ty.ret.is_ignore() { - expr::Ignore - } else { - expr::SaveIn(fcx.get_ret_slot(bcx, "iret_slot")) - }; - - // This call to trans_block is the place where we bridge between - // translation calls that don't have a return value (trans_crate, - // trans_mod, trans_item, et cetera) and those that do - // (trans_block, trans_expr, et cetera). - let mut bcx = controlflow::trans_block(bcx, body, dest); - - match dest { - expr::SaveIn(slot) if fcx.needs_ret_allocas => { - Store(bcx, slot, fcx.llretslotptr.get().unwrap()); - } - _ => {} - } - - match fcx.llreturn.get() { - Some(_) => { - Br(bcx, fcx.return_exit_block(), DebugLoc::None); - fcx.pop_custom_cleanup_scope(arg_scope); - } - None => { - // Microoptimization writ large: avoid creating a separate - // llreturn basic block - bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_scope); - } - }; - - // Put return block after all other blocks. - // This somewhat improves single-stepping experience in debugger. - unsafe { - let llreturn = fcx.llreturn.get(); - if let Some(llreturn) = llreturn { - llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb); - } + if fcx.mir.is_none() { + bug!("attempted translation of `{}` w/o MIR", instance); } - // Insert the mandatory first few basic blocks before lltop. - fcx.finish(bcx, fn_cleanup_debug_loc.debug_loc()); + mir::trans_mir(&fcx); } pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) { - let local_instance = inline::maybe_inline_instance(ccx, instance); - - let fn_node_id = ccx.tcx().map.as_local_node_id(local_instance.def).unwrap(); - - let _s = StatRecorder::new(ccx, ccx.tcx().node_path_str(fn_node_id)); + let _s = StatRecorder::new(ccx, ccx.tcx().item_path_str(instance.def)); debug!("trans_instance(instance={:?})", instance); let _icx = push_ctxt("trans_instance"); - let item = ccx.tcx().map.find(fn_node_id).unwrap(); - let fn_ty = ccx.tcx().lookup_item_type(instance.def).ty; let fn_ty = ccx.tcx().erase_regions(&fn_ty); - let fn_ty = monomorphize::apply_param_substs(ccx.tcx(), instance.substs, &fn_ty); + let fn_ty = monomorphize::apply_param_substs(ccx.shared(), instance.substs, &fn_ty); - let sig = ccx.tcx().erase_late_bound_regions(fn_ty.fn_sig()); - let sig = ccx.tcx().normalize_associated_type(&sig); + let sig = ccx.tcx().erase_late_bound_regions_and_normalize(fn_ty.fn_sig()); let abi = fn_ty.fn_abi(); - let lldecl = match ccx.instances().borrow().get(&local_instance) { + let lldecl = match ccx.instances().borrow().get(&instance) { Some(&val) => val, None => bug!("Instance `{:?}` not already declared", instance) }; - match item { - hir_map::NodeItem(&hir::Item { - node: hir::ItemFn(ref decl, _, _, _, _, ref body), .. - }) | - hir_map::NodeTraitItem(&hir::TraitItem { - node: hir::MethodTraitItem( - hir::MethodSig { ref decl, .. }, Some(ref body)), .. - }) | - hir_map::NodeImplItem(&hir::ImplItem { - node: hir::ImplItemKind::Method( - hir::MethodSig { ref decl, .. }, ref body), .. - }) => { - trans_closure(ccx, decl, body, lldecl, instance, - fn_node_id, &sig, abi, closure::ClosureEnv::NotClosure); - } - _ => bug!("Instance is a {:?}?", item) - } -} - -pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - ctor_ty: Ty<'tcx>, - disr: Disr, - args: CallArgs, - dest: expr::Dest, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - - let ccx = bcx.fcx.ccx; - - let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig()); - let sig = ccx.tcx().normalize_associated_type(&sig); - let result_ty = sig.output; - - // Get location to store the result. If the user does not care about - // the result, just make a stack slot - let llresult = match dest { - expr::SaveIn(d) => d, - expr::Ignore => { - if !type_is_zero_size(ccx, result_ty) { - let llresult = alloc_ty(bcx, result_ty, "constructor_result"); - call_lifetime_start(bcx, llresult); - llresult - } else { - C_undef(type_of::type_of(ccx, result_ty).ptr_to()) - } - } - }; - - if !type_is_zero_size(ccx, result_ty) { - match args { - ArgExprs(exprs) => { - let fields = exprs.iter().map(|x| &**x).enumerate().collect::>(); - bcx = expr::trans_adt(bcx, - result_ty, - disr, - &fields[..], - None, - expr::SaveIn(llresult), - debug_loc); - } - _ => bug!("expected expr as arguments for variant/struct tuple constructor"), - } - } else { - // Just eval all the expressions (if any). Since expressions in Rust can have arbitrary - // contents, there could be side-effects we need from them. - match args { - ArgExprs(exprs) => { - for expr in exprs { - bcx = expr::trans_into(bcx, expr, expr::Ignore); - } - } - _ => (), - } - } - - // If the caller doesn't care about the result - // drop the temporary we made - let bcx = match dest { - expr::SaveIn(_) => bcx, - expr::Ignore => { - let bcx = glue::drop_ty(bcx, llresult, result_ty, debug_loc); - if !type_is_zero_size(ccx, result_ty) { - call_lifetime_end(bcx, llresult); - } - bcx - } - }; - - Result::new(bcx, llresult) + trans_closure(ccx, lldecl, instance, &sig, abi); } pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ctor_id: ast::NodeId, + def_id: DefId, + substs: &'tcx Substs<'tcx>, disr: Disr, - param_substs: &'tcx Substs<'tcx>, llfndecl: ValueRef) { - let ctor_ty = ccx.tcx().node_id_to_type(ctor_id); - let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty); + attributes::inline(llfndecl, attributes::InlineAttr::Hint); + attributes::set_frame_pointer_elimination(ccx, llfndecl); - let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig()); - let sig = ccx.tcx().normalize_associated_type(&sig); + let ctor_ty = ccx.tcx().lookup_item_type(def_id).ty; + let ctor_ty = monomorphize::apply_param_substs(ccx.shared(), substs, &ctor_ty); + + let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig()); let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); let (arena, fcx): (TypedArena<_>, FunctionContext); arena = TypedArena::new(); fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None, &arena); - let bcx = fcx.init(false, None); - - assert!(!fcx.needs_ret_allocas); + let bcx = fcx.init(false); if !fcx.fn_ty.ret.is_ignore() { - let dest = fcx.get_ret_slot(bcx, "eret_slot"); + let dest = fcx.llretslotptr.get().unwrap(); let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value - let repr = adt::represent_type(ccx, sig.output); let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; let mut arg_idx = 0; for (i, arg_ty) in sig.inputs.into_iter().enumerate() { - let lldestptr = adt::trans_field_ptr(bcx, &repr, dest_val, Disr::from(disr), i); + let lldestptr = adt::trans_field_ptr(bcx, sig.output, dest_val, Disr::from(disr), i); let arg = &fcx.fn_ty.args[arg_idx]; arg_idx += 1; let b = &bcx.build(); if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { let meta = &fcx.fn_ty.args[arg_idx]; arg_idx += 1; - arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, lldestptr)); - meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, lldestptr)); + arg.store_fn_arg(b, &mut llarg_idx, get_dataptr(bcx, lldestptr)); + meta.store_fn_arg(b, &mut llarg_idx, get_meta(bcx, lldestptr)); } else { arg.store_fn_arg(b, &mut llarg_idx, lldestptr); } } - adt::trans_set_discr(bcx, &repr, dest, disr); + adt::trans_set_discr(bcx, sig.output, dest, disr); } fcx.finish(bcx, DebugLoc::None); @@ -2096,17 +1198,17 @@ pub fn llvm_linkage_by_name(name: &str) -> Option { // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported // and don't have to be, LLVM treats them as no-ops. match name { - "appending" => Some(llvm::AppendingLinkage), - "available_externally" => Some(llvm::AvailableExternallyLinkage), - "common" => Some(llvm::CommonLinkage), - "extern_weak" => Some(llvm::ExternalWeakLinkage), - "external" => Some(llvm::ExternalLinkage), - "internal" => Some(llvm::InternalLinkage), - "linkonce" => Some(llvm::LinkOnceAnyLinkage), - "linkonce_odr" => Some(llvm::LinkOnceODRLinkage), - "private" => Some(llvm::PrivateLinkage), - "weak" => Some(llvm::WeakAnyLinkage), - "weak_odr" => Some(llvm::WeakODRLinkage), + "appending" => Some(llvm::Linkage::AppendingLinkage), + "available_externally" => Some(llvm::Linkage::AvailableExternallyLinkage), + "common" => Some(llvm::Linkage::CommonLinkage), + "extern_weak" => Some(llvm::Linkage::ExternalWeakLinkage), + "external" => Some(llvm::Linkage::ExternalLinkage), + "internal" => Some(llvm::Linkage::InternalLinkage), + "linkonce" => Some(llvm::Linkage::LinkOnceAnyLinkage), + "linkonce_odr" => Some(llvm::Linkage::LinkOnceODRLinkage), + "private" => Some(llvm::Linkage::PrivateLinkage), + "weak" => Some(llvm::Linkage::WeakAnyLinkage), + "weak_odr" => Some(llvm::Linkage::WeakODRLinkage), _ => None, } } @@ -2151,7 +1253,7 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { return; } - let main_llfn = Callee::def(ccx, main_def_id, instance.substs).reify(ccx).val; + let main_llfn = Callee::def(ccx, main_def_id, instance.substs).reify(ccx); let et = ccx.sess().entry_type.get().unwrap(); match et { @@ -2192,8 +1294,8 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { Ok(id) => id, Err(s) => ccx.sess().fatal(&s) }; - let empty_substs = ccx.tcx().mk_substs(Substs::empty()); - let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx).val; + let empty_substs = Substs::empty(ccx.tcx()); + let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx); let args = { let opaque_rust_main = llvm::LLVMBuildPointerCast(bld, @@ -2245,8 +1347,7 @@ fn write_metadata(cx: &SharedCrateContext, cx.export_map(), cx.link_meta(), reachable_ids, - cx.mir_map(), - cx.tcx().map.krate()); + cx.mir_map()); let mut compressed = cstore.metadata_encoding_version().to_vec(); compressed.extend_from_slice(&flate::deflate_bytes(&metadata)); @@ -2299,10 +1400,10 @@ fn internalize_symbols<'a, 'tcx>(sess: &Session, // are referenced via a declaration in some other codegen unit. for ccx in ccxs.iter_need_trans() { for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) { - let linkage = llvm::LLVMGetLinkage(val); + let linkage = llvm::LLVMRustGetLinkage(val); // We only care about external declarations (not definitions) // and available_externally definitions. - let is_available_externally = linkage == llvm::AvailableExternallyLinkage as c_uint; + let is_available_externally = linkage == llvm::Linkage::AvailableExternallyLinkage; let is_decl = llvm::LLVMIsDeclaration(val) != 0; if is_decl || is_available_externally { @@ -2320,21 +1421,7 @@ fn internalize_symbols<'a, 'tcx>(sess: &Session, .iter() .cloned() .filter(|trans_item|{ - let def_id = match *trans_item { - TransItem::DropGlue(..) => { - return false - }, - TransItem::Fn(ref instance) => { - instance.def - } - TransItem::Static(node_id) => { - tcx.map.local_def_id(node_id) - } - }; - - trans_item.explicit_linkage(tcx).is_some() || - attr::contains_extern_indicator(tcx.sess.diagnostic(), - &tcx.get_attrs(def_id)) + trans_item.explicit_linkage(tcx).is_some() }) .map(|trans_item| symbol_map.get_or_compute(scx, trans_item)) .collect(); @@ -2344,11 +1431,11 @@ fn internalize_symbols<'a, 'tcx>(sess: &Session, // then give it internal linkage. for ccx in ccxs.iter_need_trans() { for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) { - let linkage = llvm::LLVMGetLinkage(val); + let linkage = llvm::LLVMRustGetLinkage(val); - let is_externally_visible = (linkage == llvm::ExternalLinkage as c_uint) || - (linkage == llvm::LinkOnceODRLinkage as c_uint) || - (linkage == llvm::WeakODRLinkage as c_uint); + let is_externally_visible = (linkage == llvm::Linkage::ExternalLinkage) || + (linkage == llvm::Linkage::LinkOnceODRLinkage) || + (linkage == llvm::Linkage::WeakODRLinkage); let is_definition = llvm::LLVMIsDeclaration(val) == 0; // If this is a definition (as opposed to just a declaration) @@ -2363,7 +1450,7 @@ fn internalize_symbols<'a, 'tcx>(sess: &Session, let has_fixed_linkage = linkage_fixed_explicitly.contains(&name_cow); if !is_referenced_somewhere && !is_reachable && !has_fixed_linkage { - llvm::LLVMSetLinkage(val, llvm::InternalLinkage); + llvm::LLVMRustSetLinkage(val, llvm::Linkage::InternalLinkage); llvm::LLVMSetDLLStorageClass(val, llvm::DLLStorageClass::Default); llvm::UnsetComdat(val); @@ -2393,8 +1480,8 @@ fn create_imps(cx: &CrateContextList) { for ccx in cx.iter_need_trans() { let exported: Vec<_> = iter_globals(ccx.llmod()) .filter(|&val| { - llvm::LLVMGetLinkage(val) == - llvm::ExternalLinkage as c_uint && + llvm::LLVMRustGetLinkage(val) == + llvm::Linkage::ExternalLinkage && llvm::LLVMIsDeclaration(val) == 0 }) .collect(); @@ -2410,7 +1497,7 @@ fn create_imps(cx: &CrateContextList) { imp_name.as_ptr() as *const _); let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref()); llvm::LLVMSetInitializer(imp, init); - llvm::LLVMSetLinkage(imp, llvm::ExternalLinkage); + llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); } } } @@ -2489,8 +1576,12 @@ pub fn filter_reachable_ids(tcx: TyCtxt, reachable: NodeSet) -> NodeSet { hir_map::NodeImplItem(&hir::ImplItem { node: hir::ImplItemKind::Method(..), .. }) => { let def_id = tcx.map.local_def_id(id); - let scheme = tcx.lookup_item_type(def_id); - scheme.generics.types.is_empty() + let generics = tcx.lookup_generics(def_id); + let attributes = tcx.get_attrs(def_id); + (generics.parent_types == 0 && generics.types.is_empty()) && + // Functions marked with #[inline] are only ever translated + // with "internal" linkage and are never exported. + !attr::requests_inline(&attributes[..]) } _ => false @@ -2500,7 +1591,8 @@ pub fn filter_reachable_ids(tcx: TyCtxt, reachable: NodeSet) -> NodeSet { pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir_map: &MirMap<'tcx>, - analysis: ty::CrateAnalysis) + analysis: ty::CrateAnalysis, + incremental_hashes_map: &IncrementalHashesMap) -> CrateTranslation { let _task = tcx.dep_graph.in_task(DepNode::TransCrate); @@ -2519,13 +1611,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, tcx.sess.opts.debug_assertions }; - let check_dropflag = if let Some(v) = tcx.sess.opts.debugging_opts.force_dropflag_checks { - v - } else { - tcx.sess.opts.debug_assertions - }; - - let link_meta = link::build_link_meta(tcx, name); + let link_meta = link::build_link_meta(incremental_hashes_map, name); let shared_ccx = SharedCrateContext::new(tcx, &mir_map, @@ -2533,8 +1619,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, Sha256::new(), link_meta.clone(), reachable, - check_overflow, - check_dropflag); + check_overflow); // Translate the metadata. let metadata = time(tcx.sess.time_passes(), "write metadata", || { write_metadata(&shared_ccx, shared_ccx.reachable()) @@ -2648,10 +1733,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, println!("n_null_glues: {}", stats.n_null_glues.get()); println!("n_real_glues: {}", stats.n_real_glues.get()); - println!("n_fallback_instantiations: {}", stats.n_fallback_instantiations.get()); - println!("n_fns: {}", stats.n_fns.get()); - println!("n_monos: {}", stats.n_monos.get()); println!("n_inlines: {}", stats.n_inlines.get()); println!("n_closures: {}", stats.n_closures.get()); println!("fn stats:"); @@ -2801,11 +1883,10 @@ fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a }; let codegen_units = time(time_passes, "codegen unit partitioning", || { - partitioning::partition(scx.tcx(), + partitioning::partition(scx, items.iter().cloned(), strategy, - &inlining_map, - scx.reachable()) + &inlining_map) }); assert!(scx.tcx().sess.opts.cg.codegen_units == codegen_units.len() || @@ -2820,7 +1901,7 @@ fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a } if scx.sess().opts.debugging_opts.print_trans_items.is_some() { - let mut item_to_cgus = HashMap::new(); + let mut item_to_cgus = FnvHashMap(); for cgu in &codegen_units { for (&trans_item, &linkage) in cgu.items() { @@ -2844,17 +1925,17 @@ fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a output.push_str(&cgu_name[..]); let linkage_abbrev = match linkage { - llvm::ExternalLinkage => "External", - llvm::AvailableExternallyLinkage => "Available", - llvm::LinkOnceAnyLinkage => "OnceAny", - llvm::LinkOnceODRLinkage => "OnceODR", - llvm::WeakAnyLinkage => "WeakAny", - llvm::WeakODRLinkage => "WeakODR", - llvm::AppendingLinkage => "Appending", - llvm::InternalLinkage => "Internal", - llvm::PrivateLinkage => "Private", - llvm::ExternalWeakLinkage => "ExternalWeak", - llvm::CommonLinkage => "Common", + llvm::Linkage::ExternalLinkage => "External", + llvm::Linkage::AvailableExternallyLinkage => "Available", + llvm::Linkage::LinkOnceAnyLinkage => "OnceAny", + llvm::Linkage::LinkOnceODRLinkage => "OnceODR", + llvm::Linkage::WeakAnyLinkage => "WeakAny", + llvm::Linkage::WeakODRLinkage => "WeakODR", + llvm::Linkage::AppendingLinkage => "Appending", + llvm::Linkage::InternalLinkage => "Internal", + llvm::Linkage::PrivateLinkage => "Private", + llvm::Linkage::ExternalWeakLinkage => "ExternalWeak", + llvm::Linkage::CommonLinkage => "Common", }; output.push_str("["); diff --git a/src/librustc_trans/cabi_mips64.rs b/src/librustc_trans/cabi_mips64.rs new file mode 100644 index 0000000000..e92ef1eaec --- /dev/null +++ b/src/librustc_trans/cabi_mips64.rs @@ -0,0 +1,168 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_upper_case_globals)] + +use libc::c_uint; +use std::cmp; +use llvm; +use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; +use abi::{ArgType, FnType}; +use context::CrateContext; +use type_::Type; + +fn align_up_to(off: usize, a: usize) -> usize { + return (off + a - 1) / a * a; +} + +fn align(off: usize, ty: Type) -> usize { + let a = ty_align(ty); + return align_up_to(off, a); +} + +fn ty_align(ty: Type) -> usize { + match ty.kind() { + Integer => ((ty.int_width() as usize) + 7) / 8, + Pointer => 8, + Float => 4, + Double => 8, + Struct => { + if ty.is_packed() { + 1 + } else { + let str_tys = ty.field_types(); + str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t))) + } + } + Array => { + let elt = ty.element_type(); + ty_align(elt) + } + Vector => { + let len = ty.vector_length(); + let elt = ty.element_type(); + ty_align(elt) * len + } + _ => bug!("ty_align: unhandled type") + } +} + +fn ty_size(ty: Type) -> usize { + match ty.kind() { + Integer => ((ty.int_width() as usize) + 7) / 8, + Pointer => 8, + Float => 4, + Double => 8, + Struct => { + if ty.is_packed() { + let str_tys = ty.field_types(); + str_tys.iter().fold(0, |s, t| s + ty_size(*t)) + } else { + let str_tys = ty.field_types(); + let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t)); + align(size, ty) + } + } + Array => { + let len = ty.array_length(); + let elt = ty.element_type(); + let eltsz = ty_size(elt); + len * eltsz + } + Vector => { + let len = ty.vector_length(); + let elt = ty.element_type(); + let eltsz = ty_size(elt); + len * eltsz + } + _ => bug!("ty_size: unhandled type") + } +} + +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + if is_reg_ty(ret.ty) { + ret.extend_integer_width_to(64); + } else { + ret.make_indirect(ccx); + } +} + +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { + let orig_offset = *offset; + let size = ty_size(arg.ty) * 8; + let mut align = ty_align(arg.ty); + + align = cmp::min(cmp::max(align, 4), 8); + *offset = align_up_to(*offset, align); + *offset += align_up_to(size, align * 8) / 8; + + if !is_reg_ty(arg.ty) { + arg.cast = Some(struct_ty(ccx, arg.ty)); + arg.pad = padding_ty(ccx, align, orig_offset); + } else { + arg.extend_integer_width_to(64); + } +} + +fn is_reg_ty(ty: Type) -> bool { + return match ty.kind() { + Integer + | Pointer + | Float + | Double + | Vector => true, + _ => false + }; +} + +fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option { + if ((align - 1 ) & offset) > 0 { + Some(Type::i64(ccx)) + } else { + None + } +} + +fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec { + let int_ty = Type::i64(ccx); + let mut args = Vec::new(); + + let mut n = size / 64; + while n > 0 { + args.push(int_ty); + n -= 1; + } + + let r = size % 64; + if r > 0 { + unsafe { + args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint))); + } + } + + args +} + +fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { + let size = ty_size(ty) * 8; + Type::struct_(ccx, &coerce_to_int(ccx, size), false) +} + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); + } + + let mut offset = if fty.ret.is_indirect() { 8 } else { 0 }; + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg, &mut offset); + } +} diff --git a/src/librustc_trans/cabi_powerpc.rs b/src/librustc_trans/cabi_powerpc.rs index efbdce67a8..e05c31b1d8 100644 --- a/src/librustc_trans/cabi_powerpc.rs +++ b/src/librustc_trans/cabi_powerpc.rs @@ -28,11 +28,7 @@ fn align(off: usize, ty: Type) -> usize { fn ty_align(ty: Type) -> usize { match ty.kind() { - Integer => { - unsafe { - ((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as usize) + 7) / 8 - } - } + Integer => ((ty.int_width() as usize) + 7) / 8, Pointer => 4, Float => 4, Double => 8, @@ -54,11 +50,7 @@ fn ty_align(ty: Type) -> usize { fn ty_size(ty: Type) -> usize { match ty.kind() { - Integer => { - unsafe { - ((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as usize) + 7) / 8 - } - } + Integer => ((ty.int_width() as usize) + 7) / 8, Pointer => 4, Float => 4, Double => 8, diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs new file mode 100644 index 0000000000..19404b667e --- /dev/null +++ b/src/librustc_trans/cabi_s390x.rs @@ -0,0 +1,150 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// FIXME: The assumes we're using the non-vector ABI, i.e. compiling +// for a pre-z13 machine or using -mno-vx. + +use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; +use abi::{FnType, ArgType}; +use context::CrateContext; +use type_::Type; + +use std::cmp; + +fn align_up_to(off: usize, a: usize) -> usize { + return (off + a - 1) / a * a; +} + +fn align(off: usize, ty: Type) -> usize { + let a = ty_align(ty); + return align_up_to(off, a); +} + +fn ty_align(ty: Type) -> usize { + match ty.kind() { + Integer => ((ty.int_width() as usize) + 7) / 8, + Pointer => 8, + Float => 4, + Double => 8, + Struct => { + if ty.is_packed() { + 1 + } else { + let str_tys = ty.field_types(); + str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t))) + } + } + Array => { + let elt = ty.element_type(); + ty_align(elt) + } + Vector => ty_size(ty), + _ => bug!("ty_align: unhandled type") + } +} + +fn ty_size(ty: Type) -> usize { + match ty.kind() { + Integer => ((ty.int_width() as usize) + 7) / 8, + Pointer => 8, + Float => 4, + Double => 8, + Struct => { + if ty.is_packed() { + let str_tys = ty.field_types(); + str_tys.iter().fold(0, |s, t| s + ty_size(*t)) + } else { + let str_tys = ty.field_types(); + let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t)); + align(size, ty) + } + } + Array => { + let len = ty.array_length(); + let elt = ty.element_type(); + let eltsz = ty_size(elt); + len * eltsz + } + Vector => { + let len = ty.vector_length(); + let elt = ty.element_type(); + let eltsz = ty_size(elt); + len * eltsz + } + _ => bug!("ty_size: unhandled type") + } +} + +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + if is_reg_ty(ret.ty) { + ret.extend_integer_width_to(64); + } else { + ret.make_indirect(ccx); + } +} + +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { + if arg.ty.kind() == Struct { + fn is_single_fp_element(tys: &[Type]) -> bool { + if tys.len() != 1 { + return false; + } + match tys[0].kind() { + Float | Double => true, + Struct => is_single_fp_element(&tys[0].field_types()), + _ => false + } + } + + if is_single_fp_element(&arg.ty.field_types()) { + match ty_size(arg.ty) { + 4 => arg.cast = Some(Type::f32(ccx)), + 8 => arg.cast = Some(Type::f64(ccx)), + _ => arg.make_indirect(ccx) + } + } else { + match ty_size(arg.ty) { + 1 => arg.cast = Some(Type::i8(ccx)), + 2 => arg.cast = Some(Type::i16(ccx)), + 4 => arg.cast = Some(Type::i32(ccx)), + 8 => arg.cast = Some(Type::i64(ccx)), + _ => arg.make_indirect(ccx) + } + } + return; + } + + if is_reg_ty(arg.ty) { + arg.extend_integer_width_to(64); + } else { + arg.make_indirect(ccx); + } +} + +fn is_reg_ty(ty: Type) -> bool { + match ty.kind() { + Integer + | Pointer + | Float + | Double => ty_size(ty) <= 8, + _ => false + } +} + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); + } + + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg); + } +} diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 805c7d345a..eb67f4ca61 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -182,7 +182,7 @@ fn classify_ty(ty: Type) -> Vec { (SSEDs, SSEUp) | (SSEInt(_), SSEUp) => return, - (_, _) => newv + (..) => newv }; cls[i] = to_write; } diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index aaec2a4702..05e22896c4 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -15,48 +15,31 @@ //! closure. pub use self::CalleeData::*; -pub use self::CallArgs::*; use arena::TypedArena; -use back::symbol_names; use llvm::{self, ValueRef, get_params}; -use middle::cstore::LOCAL_CRATE; use rustc::hir::def_id::DefId; -use rustc::ty::subst; +use rustc::ty::subst::Substs; use rustc::traits; -use rustc::hir::map as hir_map; use abi::{Abi, FnType}; -use adt; use attributes; use base; use base::*; use build::*; -use cleanup; -use cleanup::CleanupMethods; use closure; -use common::{self, Block, Result, CrateContext, FunctionContext, C_undef}; +use common::{self, Block, Result, CrateContext, FunctionContext, SharedCrateContext}; use consts; -use datum::*; use debuginfo::DebugLoc; use declare; -use expr; -use glue; -use inline; -use intrinsic; -use machine::llalign_of_min; use meth; use monomorphize::{self, Instance}; use trans_item::TransItem; -use type_::Type; use type_of; -use value::Value; use Disr; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, Ty, TypeFoldable}; use rustc::hir; use syntax_pos::DUMMY_SP; -use errors; -use syntax::ptr::P; #[derive(Debug)] pub enum CalleeData { @@ -80,10 +63,10 @@ pub struct Callee<'tcx> { impl<'tcx> Callee<'tcx> { /// Function pointer. - pub fn ptr(datum: Datum<'tcx, Rvalue>) -> Callee<'tcx> { + pub fn ptr(llfn: ValueRef, ty: Ty<'tcx>) -> Callee<'tcx> { Callee { - data: Fn(datum.val), - ty: datum.ty + data: Fn(llfn), + ty: ty } } @@ -105,103 +88,88 @@ impl<'tcx> Callee<'tcx> { /// Function or method definition. pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId, - substs: &'tcx subst::Substs<'tcx>) + substs: &'tcx Substs<'tcx>) -> Callee<'tcx> { let tcx = ccx.tcx(); - if substs.self_ty().is_some() { - // Only trait methods can have a Self parameter. - return Callee::trait_method(ccx, def_id, substs); + if let Some(trait_id) = tcx.trait_of_item(def_id) { + return Callee::trait_method(ccx, trait_id, def_id, substs); } - let maybe_node_id = inline::get_local_instance(ccx, def_id) - .and_then(|def_id| tcx.map.as_local_node_id(def_id)); - let maybe_ast_node = maybe_node_id.and_then(|node_id| { - tcx.map.find(node_id) - }); - - let data = match maybe_ast_node { - Some(hir_map::NodeStructCtor(_)) => { - NamedTupleConstructor(Disr(0)) - } - Some(hir_map::NodeVariant(_)) => { - let vinfo = common::inlined_variant_def(ccx, maybe_node_id.unwrap()); - NamedTupleConstructor(Disr::from(vinfo.disr_val)) + let fn_ty = def_ty(ccx.shared(), def_id, substs); + if let ty::TyFnDef(.., f) = fn_ty.sty { + if f.abi == Abi::RustIntrinsic || f.abi == Abi::PlatformIntrinsic { + return Callee { + data: Intrinsic, + ty: fn_ty + }; } - Some(hir_map::NodeForeignItem(fi)) if { - let abi = tcx.map.get_foreign_abi(fi.id); - abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic - } => Intrinsic, - - _ => return Callee::ptr(get_fn(ccx, def_id, substs)) - }; + } - Callee { - data: data, - ty: def_ty(tcx, def_id, substs) + // FIXME(eddyb) Detect ADT constructors more efficiently. + if let Some(adt_def) = fn_ty.fn_ret().skip_binder().ty_adt_def() { + if let Some(v) = adt_def.variants.iter().find(|v| def_id == v.did) { + return Callee { + data: NamedTupleConstructor(Disr::from(v.disr_val)), + ty: fn_ty + }; + } } + + let (llfn, ty) = get_fn(ccx, def_id, substs); + Callee::ptr(llfn, ty) } /// Trait method, which has to be resolved to an impl method. pub fn trait_method<'a>(ccx: &CrateContext<'a, 'tcx>, + trait_id: DefId, def_id: DefId, - substs: &'tcx subst::Substs<'tcx>) + substs: &'tcx Substs<'tcx>) -> Callee<'tcx> { let tcx = ccx.tcx(); - let method_item = tcx.impl_or_trait_item(def_id); - let trait_id = method_item.container().id(); - let trait_ref = ty::Binder(substs.to_trait_ref(tcx, trait_id)); - let trait_ref = tcx.normalize_associated_type(&trait_ref); + let trait_ref = ty::TraitRef::from_method(tcx, trait_id, substs); + let trait_ref = tcx.normalize_associated_type(&ty::Binder(trait_ref)); match common::fulfill_obligation(ccx.shared(), DUMMY_SP, trait_ref) { traits::VtableImpl(vtable_impl) => { - let impl_did = vtable_impl.impl_def_id; - let mname = tcx.item_name(def_id); - // create a concatenated set of substitutions which includes - // those from the impl and those from the method: - let impl_substs = vtable_impl.substs.with_method_from(&substs); - let substs = tcx.mk_substs(impl_substs); - let mth = meth::get_impl_method(tcx, impl_did, substs, mname); + let name = tcx.item_name(def_id); + let (def_id, substs) = traits::find_method(tcx, name, substs, &vtable_impl); // Translate the function, bypassing Callee::def. // That is because default methods have the same ID as the // trait method used to look up the impl method that ended // up here, so calling Callee::def would infinitely recurse. - Callee::ptr(get_fn(ccx, mth.method.def_id, mth.substs)) + let (llfn, ty) = get_fn(ccx, def_id, substs); + Callee::ptr(llfn, ty) } traits::VtableClosure(vtable_closure) => { // The substitutions should have no type parameters remaining // after passing through fulfill_obligation let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap(); + let instance = Instance::new(def_id, substs); let llfn = closure::trans_closure_method(ccx, vtable_closure.closure_def_id, vtable_closure.substs, + instance, trait_closure_kind); - let method_ty = def_ty(tcx, def_id, substs); - let fn_ptr_ty = match method_ty.sty { - ty::TyFnDef(_, _, fty) => tcx.mk_fn_ptr(fty), - _ => bug!("expected fn item type, found {}", - method_ty) - }; - Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty)) + let method_ty = def_ty(ccx.shared(), def_id, substs); + Callee::ptr(llfn, method_ty) } traits::VtableFnPointer(vtable_fn_pointer) => { let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap(); - let llfn = trans_fn_pointer_shim(ccx, trait_closure_kind, vtable_fn_pointer.fn_ty); + let instance = Instance::new(def_id, substs); + let llfn = trans_fn_pointer_shim(ccx, instance, + trait_closure_kind, + vtable_fn_pointer.fn_ty); - let method_ty = def_ty(tcx, def_id, substs); - let fn_ptr_ty = match method_ty.sty { - ty::TyFnDef(_, _, fty) => tcx.mk_fn_ptr(fty), - _ => bug!("expected fn item type, found {}", - method_ty) - }; - Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty)) + let method_ty = def_ty(ccx.shared(), def_id, substs); + Callee::ptr(llfn, method_ty) } traits::VtableObject(ref data) => { Callee { data: Virtual(tcx.get_vtable_index_of_object_method(data, def_id)), - ty: def_ty(tcx, def_id, substs) + ty: def_ty(ccx.shared(), def_id, substs) } } vtable => { @@ -216,8 +184,7 @@ impl<'tcx> Callee<'tcx> { pub fn direct_fn_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>, extra_args: &[Ty<'tcx>]) -> FnType { let abi = self.ty.fn_abi(); - let sig = ccx.tcx().erase_late_bound_regions(self.ty.fn_sig()); - let sig = ccx.tcx().normalize_associated_type(&sig); + let sig = ccx.tcx().erase_late_bound_regions_and_normalize(self.ty.fn_sig()); let mut fn_ty = FnType::unadjusted(ccx, abi, &sig, extra_args); if let Virtual(_) = self.data { // Don't pass the vtable, it's not an argument of the virtual fn. @@ -240,30 +207,32 @@ impl<'tcx> Callee<'tcx> { /// function. pub fn call<'a, 'blk>(self, bcx: Block<'blk, 'tcx>, debug_loc: DebugLoc, - args: CallArgs<'a, 'tcx>, - dest: Option) + args: &[ValueRef], + dest: Option) -> Result<'blk, 'tcx> { trans_call_inner(bcx, debug_loc, self, args, dest) } /// Turn the callee into a function pointer. - pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) - -> Datum<'tcx, Rvalue> { - let fn_ptr_ty = match self.ty.sty { - ty::TyFnDef(_, _, f) => ccx.tcx().mk_fn_ptr(f), - _ => self.ty - }; + pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { match self.data { - Fn(llfn) => { - immediate_rvalue(llfn, fn_ptr_ty) - } - Virtual(idx) => { - let llfn = meth::trans_object_shim(ccx, self.ty, idx); - immediate_rvalue(llfn, fn_ptr_ty) - } - NamedTupleConstructor(_) => match self.ty.sty { + Fn(llfn) => llfn, + Virtual(_) => meth::trans_object_shim(ccx, self), + NamedTupleConstructor(disr) => match self.ty.sty { ty::TyFnDef(def_id, substs, _) => { - return get_fn(ccx, def_id, substs); + let instance = Instance::new(def_id, substs); + if let Some(&llfn) = ccx.instances().borrow().get(&instance) { + return llfn; + } + + let sym = ccx.symbol_map().get_or_compute(ccx.shared(), + TransItem::Fn(instance)); + assert!(!ccx.codegen_unit().contains_item(&TransItem::Fn(instance))); + let lldecl = declare::define_internal_fn(ccx, &sym, self.ty); + base::trans_ctor_shim(ccx, def_id, substs, disr, lldecl); + ccx.instances().borrow_mut().insert(instance, lldecl); + + lldecl } _ => bug!("expected fn item type, found {}", self.ty) }, @@ -273,12 +242,12 @@ impl<'tcx> Callee<'tcx> { } /// Given a DefId and some Substs, produces the monomorphic item type. -fn def_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +fn def_ty<'a, 'tcx>(shared: &SharedCrateContext<'a, 'tcx>, def_id: DefId, - substs: &'tcx subst::Substs<'tcx>) + substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { - let ty = tcx.lookup_item_type(def_id).ty; - monomorphize::apply_param_substs(tcx, substs, &ty) + let ty = shared.tcx().lookup_item_type(def_id).ty; + monomorphize::apply_param_substs(shared, substs, &ty) } /// Translates an adapter that implements the `Fn` trait for a fn @@ -293,8 +262,9 @@ fn def_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, /// ``` /// /// but for the bare function type given. -pub fn trans_fn_pointer_shim<'a, 'tcx>( +fn trans_fn_pointer_shim<'a, 'tcx>( ccx: &'a CrateContext<'a, 'tcx>, + method_instance: Instance<'tcx>, closure_kind: ty::ClosureKind, bare_fn_ty: Ty<'tcx>) -> ValueRef @@ -314,7 +284,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( let llfnpointer = match bare_fn_ty.sty { ty::TyFnDef(def_id, substs, _) => { // Function definitions have to be turned into a pointer. - let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val; + let llfn = Callee::def(ccx, def_id, substs).reify(ccx); if !is_by_ref { // A by-value fn item is ignored, so the shim has // the same signature as the original function. @@ -343,7 +313,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( // Construct the "tuply" version of `bare_fn_ty`. It takes two arguments: `self`, // which is the fn pointer, and `args`, which is the arguments tuple. let sig = match bare_fn_ty.sty { - ty::TyFnDef(_, _, + ty::TyFnDef(.., &ty::BareFnTy { unsafety: hir::Unsafety::Normal, abi: Abi::Rust, ref sig }) | @@ -356,8 +326,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( bare_fn_ty); } }; - let sig = tcx.erase_late_bound_regions(sig); - let sig = ccx.tcx().normalize_associated_type(&sig); + let sig = tcx.erase_late_bound_regions_and_normalize(sig); let tuple_input_ty = tcx.mk_tup(sig.inputs.to_vec()); let sig = ty::FnSig { inputs: vec![bare_fn_ty_maybe_ref, @@ -374,17 +343,14 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( debug!("tuple_fn_ty: {:?}", tuple_fn_ty); // - let function_name = - symbol_names::internal_name_from_type_and_suffix(ccx, - bare_fn_ty, - "fn_pointer_shim"); + let function_name = method_instance.symbol_name(ccx.shared()); let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty); attributes::set_frame_pointer_elimination(ccx, llfn); // let (block_arena, fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); - let mut bcx = fcx.init(false, None); + let mut bcx = fcx.init(false); let llargs = get_params(fcx.llfn); @@ -398,17 +364,13 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( } }); - assert!(!fcx.needs_ret_allocas); - - let dest = fcx.llretslotptr.get().map(|_| - expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")) - ); + let dest = fcx.llretslotptr.get(); let callee = Callee { data: Fn(llfnpointer), ty: bare_fn_ty }; - bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx; + bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).bcx; fcx.finish(bcx, DebugLoc::None); @@ -427,91 +389,28 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>( /// - `substs`: values for each of the fn/method's parameters fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId, - substs: &'tcx subst::Substs<'tcx>) - -> Datum<'tcx, Rvalue> { + substs: &'tcx Substs<'tcx>) + -> (ValueRef, Ty<'tcx>) { let tcx = ccx.tcx(); debug!("get_fn(def_id={:?}, substs={:?})", def_id, substs); - assert!(!substs.types.needs_infer()); - assert!(!substs.types.has_escaping_regions()); - - // Check whether this fn has an inlined copy and, if so, redirect - // def_id to the local id of the inlined copy. - let def_id = inline::maybe_instantiate_inline(ccx, def_id); - - fn is_named_tuple_constructor(tcx: TyCtxt, def_id: DefId) -> bool { - let node_id = match tcx.map.as_local_node_id(def_id) { - Some(n) => n, - None => { return false; } - }; - let map_node = errors::expect( - &tcx.sess.diagnostic(), - tcx.map.find(node_id), - || "local item should be in ast map".to_string()); - - match map_node { - hir_map::NodeVariant(v) => { - v.node.data.is_tuple() - } - hir_map::NodeStructCtor(_) => true, - _ => false - } - } - let must_monomorphise = - !substs.types.is_empty() || is_named_tuple_constructor(tcx, def_id); - - debug!("get_fn({:?}) must_monomorphise: {}", - def_id, must_monomorphise); - - // Create a monomorphic version of generic functions - if must_monomorphise { - // Should be either intra-crate or inlined. - assert_eq!(def_id.krate, LOCAL_CRATE); - - let substs = tcx.normalize_associated_type(&substs); - let (val, fn_ty) = monomorphize::monomorphic_fn(ccx, def_id, substs); - let fn_ptr_ty = match fn_ty.sty { - ty::TyFnDef(_, _, fty) => { - // Create a fn pointer with the substituted signature. - tcx.mk_fn_ptr(fty) - } - _ => bug!("expected fn item type, found {}", fn_ty) - }; - assert_eq!(type_of::type_of(ccx, fn_ptr_ty), common::val_ty(val)); - return immediate_rvalue(val, fn_ptr_ty); - } + assert!(!substs.needs_infer()); + assert!(!substs.has_escaping_regions()); + assert!(!substs.has_param_types()); - // Find the actual function pointer. - let ty = ccx.tcx().lookup_item_type(def_id).ty; - let fn_ptr_ty = match ty.sty { - ty::TyFnDef(_, _, ref fty) => { - // Create a fn pointer with the normalized signature. - tcx.mk_fn_ptr(tcx.normalize_associated_type(fty)) - } - _ => bug!("expected fn item type, found {}", ty) - }; + let substs = tcx.normalize_associated_type(&substs); + let instance = Instance::new(def_id, substs); + let item_ty = ccx.tcx().lookup_item_type(def_id).ty; + let fn_ty = monomorphize::apply_param_substs(ccx.shared(), substs, &item_ty); - let instance = Instance::mono(ccx.shared(), def_id); if let Some(&llfn) = ccx.instances().borrow().get(&instance) { - return immediate_rvalue(llfn, fn_ptr_ty); + return (llfn, fn_ty); } - let local_id = ccx.tcx().map.as_local_node_id(def_id); - let local_item = match local_id.and_then(|id| tcx.map.find(id)) { - Some(hir_map::NodeItem(&hir::Item { - span, node: hir::ItemFn(..), .. - })) | - Some(hir_map::NodeTraitItem(&hir::TraitItem { - span, node: hir::MethodTraitItem(_, Some(_)), .. - })) | - Some(hir_map::NodeImplItem(&hir::ImplItem { - span, node: hir::ImplItemKind::Method(..), .. - })) => { - Some(span) - } - _ => None - }; + let sym = ccx.symbol_map().get_or_compute(ccx.shared(), + TransItem::Fn(instance)); + debug!("get_fn({:?}: {:?}) => {}", instance, fn_ty, sym); // This is subtle and surprising, but sometimes we have to bitcast // the resulting fn pointer. The reason has to do with external @@ -537,23 +436,17 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // reference. It also occurs when testing libcore and in some // other weird situations. Annoying. - let sym = ccx.symbol_map().get_or_compute(ccx.shared(), - TransItem::Fn(instance)); - - let llptrty = type_of::type_of(ccx, fn_ptr_ty); - let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) { - if let Some(span) = local_item { - if declare::get_defined_value(ccx, &sym).is_some() { - ccx.sess().span_fatal(span, - &format!("symbol `{}` is already defined", &sym)); - } + let fn_ptr_ty = match fn_ty.sty { + ty::TyFnDef(.., fty) => { + // Create a fn pointer with the substituted signature. + tcx.mk_fn_ptr(fty) } + _ => bug!("expected fn item type, found {}", fn_ty) + }; + let llptrty = type_of::type_of(ccx, fn_ptr_ty); + let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) { if common::val_ty(llfn) != llptrty { - if local_item.is_some() { - bug!("symbol `{}` previously declared as {:?}, now wanted as {:?}", - sym, Value(llfn), llptrty); - } debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); consts::ptrcast(llfn, llptrty) } else { @@ -561,15 +454,21 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, llfn } } else { - let llfn = declare::declare_fn(ccx, &sym, ty); + let llfn = declare::declare_fn(ccx, &sym, fn_ty); assert_eq!(common::val_ty(llfn), llptrty); debug!("get_fn: not casting pointer!"); let attrs = ccx.tcx().get_attrs(def_id); attributes::from_fn_attrs(ccx, &attrs, llfn); - if local_item.is_some() { + + let is_local_def = ccx.shared().translation_items().borrow() + .contains(&TransItem::Fn(instance)); + if is_local_def { // FIXME(eddyb) Doubt all extern fn should allow unwinding. attributes::unwind(llfn, true); + unsafe { + llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage); + } } llfn @@ -577,17 +476,17 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ccx.instances().borrow_mut().insert(instance, llfn); - immediate_rvalue(llfn, fn_ptr_ty) + (llfn, fn_ty) } // ______________________________________________________________________ // Translating calls -fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, +fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, debug_loc: DebugLoc, callee: Callee<'tcx>, - args: CallArgs<'a, 'tcx>, - dest: Option) + args: &[ValueRef], + opt_llretslot: Option) -> Result<'blk, 'tcx> { // Introduce a temporary cleanup scope that will contain cleanups // for the arguments while they are being evaluated. The purpose @@ -599,65 +498,16 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let fcx = bcx.fcx; let ccx = fcx.ccx; - let abi = callee.ty.fn_abi(); - let sig = callee.ty.fn_sig(); - let output = bcx.tcx().erase_late_bound_regions(&sig.output()); - let output = bcx.tcx().normalize_associated_type(&output); - - let extra_args = match args { - ArgExprs(args) if abi != Abi::RustCall => { - args[sig.0.inputs.len()..].iter().map(|expr| { - common::expr_ty_adjusted(bcx, expr) - }).collect() - } - _ => vec![] - }; - let fn_ty = callee.direct_fn_type(ccx, &extra_args); + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(ccx, &[]); let mut callee = match callee.data { - Intrinsic => { - assert!(abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic); - assert!(dest.is_some()); - - return intrinsic::trans_intrinsic_call(bcx, callee.ty, &fn_ty, - args, dest.unwrap(), - debug_loc); - } - NamedTupleConstructor(disr) => { - assert!(dest.is_some()); - - return base::trans_named_tuple_constructor(bcx, - callee.ty, - disr, - args, - dest.unwrap(), - debug_loc); + NamedTupleConstructor(_) | Intrinsic => { + bug!("{:?} calls should not go through Callee::call", callee); } f => f }; - // Generate a location to store the result. If the user does - // not care about the result, just make a stack slot. - let opt_llretslot = dest.and_then(|dest| match dest { - expr::SaveIn(dst) => Some(dst), - expr::Ignore => { - let needs_drop = || bcx.fcx.type_needs_drop(output); - if fn_ty.ret.is_indirect() || fn_ty.ret.cast.is_some() || needs_drop() { - // Push the out-pointer if we use an out-pointer for this - // return type, otherwise push "undef". - if fn_ty.ret.is_ignore() { - Some(C_undef(fn_ty.ret.original_ty.ptr_to())) - } else { - let llresult = alloca(bcx, fn_ty.ret.original_ty, "__llret"); - call_lifetime_start(bcx, llresult); - Some(llresult) - } - } else { - None - } - } - }); - // If there no destination, return must be direct, with no cast. if opt_llretslot.is_none() { assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); @@ -673,17 +523,24 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, llargs.push(llretslot); } - let arg_cleanup_scope = fcx.push_custom_cleanup_scope(); - bcx = trans_args(bcx, abi, &fn_ty, &mut callee, args, &mut llargs, - cleanup::CustomScope(arg_cleanup_scope)); - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); + match callee { + Virtual(idx) => { + llargs.push(args[0]); + + let fn_ptr = meth::get_virtual_method(bcx, args[1], idx); + let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); + callee = Fn(PointerCast(bcx, fn_ptr, llty)); + llargs.extend_from_slice(&args[2..]); + } + _ => llargs.extend_from_slice(args) + } let llfn = match callee { Fn(f) => f, _ => bug!("expected fn pointer callee, found {:?}", callee) }; - let (llret, mut bcx) = base::invoke(bcx, llfn, &llargs, debug_loc); + let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc); if !bcx.unreachable.get() { fn_ty.apply_attrs_callsite(llret); @@ -699,283 +556,9 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } } - fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_cleanup_scope); - - // If the caller doesn't care about the result of this fn call, - // drop the temporary slot we made. - match (dest, opt_llretslot) { - (Some(expr::Ignore), Some(llretslot)) => { - // drop the value if it is not being saved. - bcx = glue::drop_ty(bcx, llretslot, output, debug_loc); - call_lifetime_end(bcx, llretslot); - } - _ => {} - } - - // FIXME(canndrew): This is_never should really be an is_uninhabited - if output.is_never() { + if fn_ret.0.is_never() { Unreachable(bcx); } Result::new(bcx, llret) } - -pub enum CallArgs<'a, 'tcx> { - /// Supply value of arguments as a list of expressions that must be - /// translated. This is used in the common case of `foo(bar, qux)`. - ArgExprs(&'a [P]), - - /// Supply value of arguments as a list of LLVM value refs; frequently - /// used with lang items and so forth, when the argument is an internal - /// value. - ArgVals(&'a [ValueRef]), - - /// For overloaded operators: `(lhs, Option(rhs))`. - /// `lhs` is the left-hand-side and `rhs` is the datum - /// of the right-hand-side argument (if any). - ArgOverloadedOp(Datum<'tcx, Expr>, Option>), - - /// Supply value of arguments as a list of expressions that must be - /// translated, for overloaded call operators. - ArgOverloadedCall(Vec<&'a hir::Expr>), -} - -fn trans_args_under_call_abi<'blk, 'tcx>( - mut bcx: Block<'blk, 'tcx>, - arg_exprs: &[P], - callee: &mut CalleeData, - fn_ty: &FnType, - llargs: &mut Vec, - arg_cleanup_scope: cleanup::ScopeId) - -> Block<'blk, 'tcx> -{ - let mut arg_idx = 0; - - // Translate the `self` argument first. - let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0])); - bcx = trans_arg_datum(bcx, - arg_datum, - callee, fn_ty, &mut arg_idx, - arg_cleanup_scope, - llargs); - - // Now untuple the rest of the arguments. - let tuple_expr = &arg_exprs[1]; - let tuple_type = common::node_id_type(bcx, tuple_expr.id); - - match tuple_type.sty { - ty::TyTuple(ref field_types) => { - let tuple_datum = unpack_datum!(bcx, - expr::trans(bcx, &tuple_expr)); - let tuple_lvalue_datum = - unpack_datum!(bcx, - tuple_datum.to_lvalue_datum(bcx, - "args", - tuple_expr.id)); - let repr = adt::represent_type(bcx.ccx(), tuple_type); - let repr_ptr = &repr; - for (i, field_type) in field_types.iter().enumerate() { - let arg_datum = tuple_lvalue_datum.get_element( - bcx, - field_type, - |srcval| { - adt::trans_field_ptr(bcx, repr_ptr, srcval, Disr(0), i) - }).to_expr_datum(); - bcx = trans_arg_datum(bcx, - arg_datum, - callee, fn_ty, &mut arg_idx, - arg_cleanup_scope, - llargs); - } - } - _ => { - span_bug!(tuple_expr.span, - "argument to `.call()` wasn't a tuple?!") - } - }; - - bcx -} - -pub fn trans_args<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - abi: Abi, - fn_ty: &FnType, - callee: &mut CalleeData, - args: CallArgs<'a, 'tcx>, - llargs: &mut Vec, - arg_cleanup_scope: cleanup::ScopeId) - -> Block<'blk, 'tcx> { - debug!("trans_args(abi={})", abi); - - let _icx = push_ctxt("trans_args"); - - let mut bcx = bcx; - let mut arg_idx = 0; - - // First we figure out the caller's view of the types of the arguments. - // This will be needed if this is a generic call, because the callee has - // to cast her view of the arguments to the caller's view. - match args { - ArgExprs(arg_exprs) => { - if abi == Abi::RustCall { - // This is only used for direct calls to the `call`, - // `call_mut` or `call_once` functions. - return trans_args_under_call_abi(bcx, - arg_exprs, callee, fn_ty, - llargs, - arg_cleanup_scope) - } - - for arg_expr in arg_exprs { - let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_expr)); - bcx = trans_arg_datum(bcx, - arg_datum, - callee, fn_ty, &mut arg_idx, - arg_cleanup_scope, - llargs); - } - } - ArgOverloadedCall(arg_exprs) => { - for expr in arg_exprs { - let arg_datum = - unpack_datum!(bcx, expr::trans(bcx, expr)); - bcx = trans_arg_datum(bcx, - arg_datum, - callee, fn_ty, &mut arg_idx, - arg_cleanup_scope, - llargs); - } - } - ArgOverloadedOp(lhs, rhs) => { - bcx = trans_arg_datum(bcx, lhs, - callee, fn_ty, &mut arg_idx, - arg_cleanup_scope, - llargs); - - if let Some(rhs) = rhs { - bcx = trans_arg_datum(bcx, rhs, - callee, fn_ty, &mut arg_idx, - arg_cleanup_scope, - llargs); - } - } - ArgVals(vs) => { - match *callee { - Virtual(idx) => { - llargs.push(vs[0]); - - let fn_ptr = meth::get_virtual_method(bcx, vs[1], idx); - let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); - *callee = Fn(PointerCast(bcx, fn_ptr, llty)); - llargs.extend_from_slice(&vs[2..]); - } - _ => llargs.extend_from_slice(vs) - } - } - } - - bcx -} - -fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - arg_datum: Datum<'tcx, Expr>, - callee: &mut CalleeData, - fn_ty: &FnType, - next_idx: &mut usize, - arg_cleanup_scope: cleanup::ScopeId, - llargs: &mut Vec) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_arg_datum"); - let mut bcx = bcx; - - debug!("trans_arg_datum({:?})", arg_datum); - - let arg = &fn_ty.args[*next_idx]; - *next_idx += 1; - - // Fill padding with undef value, where applicable. - if let Some(ty) = arg.pad { - llargs.push(C_undef(ty)); - } - - // Determine whether we want a by-ref datum even if not appropriate. - let want_by_ref = arg.is_indirect() || arg.cast.is_some(); - - let fat_ptr = common::type_is_fat_ptr(bcx.tcx(), arg_datum.ty); - let (by_ref, val) = if fat_ptr && !bcx.fcx.type_needs_drop(arg_datum.ty) { - (true, arg_datum.val) - } else { - // Make this an rvalue, since we are going to be - // passing ownership. - let arg_datum = unpack_datum!( - bcx, arg_datum.to_rvalue_datum(bcx, "arg")); - - // Now that arg_datum is owned, get it into the appropriate - // mode (ref vs value). - let arg_datum = unpack_datum!(bcx, if want_by_ref { - arg_datum.to_ref_datum(bcx) - } else { - arg_datum.to_appropriate_datum(bcx) - }); - - // Technically, ownership of val passes to the callee. - // However, we must cleanup should we panic before the - // callee is actually invoked. - (arg_datum.kind.is_by_ref(), - arg_datum.add_clean(bcx.fcx, arg_cleanup_scope)) - }; - - if arg.is_ignore() { - return bcx; - } - - debug!("--- trans_arg_datum passing {:?}", Value(val)); - - if fat_ptr { - // Fat pointers should be passed without any transformations. - assert!(!arg.is_indirect() && arg.cast.is_none()); - llargs.push(Load(bcx, expr::get_dataptr(bcx, val))); - - let info_arg = &fn_ty.args[*next_idx]; - *next_idx += 1; - assert!(!info_arg.is_indirect() && info_arg.cast.is_none()); - let info = Load(bcx, expr::get_meta(bcx, val)); - - if let Virtual(idx) = *callee { - // We have to grab the fn pointer from the vtable when - // handling the first argument, ensure that here. - assert_eq!(*next_idx, 2); - assert!(info_arg.is_ignore()); - let fn_ptr = meth::get_virtual_method(bcx, info, idx); - let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); - *callee = Fn(PointerCast(bcx, fn_ptr, llty)); - } else { - assert!(!info_arg.is_ignore()); - llargs.push(info); - } - return bcx; - } - - let mut val = val; - if by_ref && !arg.is_indirect() { - // Have to load the argument, maybe while casting it. - if arg.original_ty == Type::i1(bcx.ccx()) { - // We store bools as i8 so we need to truncate to i1. - val = LoadRangeAssert(bcx, val, 0, 2, llvm::False); - val = Trunc(bcx, val, arg.original_ty); - } else if let Some(ty) = arg.cast { - val = Load(bcx, PointerCast(bcx, val, ty.ptr_to())); - if !bcx.unreachable.get() { - let llalign = llalign_of_min(bcx.ccx(), arg.ty); - unsafe { - llvm::LLVMSetAlignment(val, llalign); - } - } - } else { - val = Load(bcx, val); - } - } - - llargs.push(val); - bcx -} diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 3081f055bb..d368ce4743 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -114,37 +114,22 @@ //! code for `expr` itself is responsible for freeing any other byproducts //! that may be in play. -pub use self::ScopeId::*; -pub use self::CleanupScopeKind::*; pub use self::EarlyExitLabel::*; -pub use self::Heap::*; use llvm::{BasicBlockRef, ValueRef}; use base; use build; use common; -use common::{Block, FunctionContext, NodeIdAndSpan, LandingPad}; -use datum::{Datum, Lvalue}; -use debuginfo::{DebugLoc, ToDebugLoc}; +use common::{Block, FunctionContext, LandingPad}; +use debuginfo::{DebugLoc}; use glue; -use middle::region; use type_::Type; use value::Value; -use rustc::ty::{Ty, TyCtxt}; - -use std::fmt; -use syntax::ast; - -pub struct CleanupScope<'blk, 'tcx: 'blk> { - // The id of this cleanup scope. If the id is None, - // this is a *temporary scope* that is pushed during trans to - // cleanup miscellaneous garbage that trans may generate whose - // lifetime is a subset of some expression. See module doc for - // more details. - kind: CleanupScopeKind<'blk, 'tcx>, +use rustc::ty::Ty; +pub struct CleanupScope<'tcx> { // Cleanups to run upon scope exit. - cleanups: Vec>, + cleanups: Vec>, // The debug location any drop calls generated for this scope will be // associated with. @@ -159,37 +144,9 @@ pub struct CustomScopeIndex { index: usize } -pub const EXIT_BREAK: usize = 0; -pub const EXIT_LOOP: usize = 1; -pub const EXIT_MAX: usize = 2; - -pub enum CleanupScopeKind<'blk, 'tcx: 'blk> { - CustomScopeKind, - AstScopeKind(ast::NodeId), - LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX]) -} - -impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - CustomScopeKind => write!(f, "CustomScopeKind"), - AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid), - LoopScopeKind(nid, ref blks) => { - write!(f, "LoopScopeKind({}, [", nid)?; - for blk in blks { - write!(f, "{:p}, ", blk)?; - } - write!(f, "])") - } - } - } -} - #[derive(Copy, Clone, PartialEq, Debug)] pub enum EarlyExitLabel { UnwindExit(UnwindKind), - ReturnExit, - LoopExit(ast::NodeId, usize) } #[derive(Copy, Clone, Debug)] @@ -205,97 +162,8 @@ pub struct CachedEarlyExit { last_cleanup: usize, } -pub trait Cleanup<'tcx> { - fn must_unwind(&self) -> bool; - fn is_lifetime_end(&self) -> bool; - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx>; -} - -pub type CleanupObj<'tcx> = Box+'tcx>; - -#[derive(Copy, Clone, Debug)] -pub enum ScopeId { - AstScope(ast::NodeId), - CustomScope(CustomScopeIndex) -} - -#[derive(Copy, Clone, Debug)] -pub struct DropHint(pub ast::NodeId, pub K); - -pub type DropHintDatum<'tcx> = DropHint>; -pub type DropHintValue = DropHint; - -impl DropHint { - pub fn new(id: ast::NodeId, k: K) -> DropHint { DropHint(id, k) } -} - -impl DropHint { - pub fn value(&self) -> ValueRef { self.1 } -} - -pub trait DropHintMethods { - type ValueKind; - fn to_value(&self) -> Self::ValueKind; -} -impl<'tcx> DropHintMethods for DropHintDatum<'tcx> { - type ValueKind = DropHintValue; - fn to_value(&self) -> DropHintValue { DropHint(self.0, self.1.val) } -} - -impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { - /// Invoked when we start to trans the code contained within a new cleanup scope. - fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) { - debug!("push_ast_cleanup_scope({})", - self.ccx.tcx().map.node_to_string(debug_loc.id)); - - // FIXME(#2202) -- currently closure bodies have a parent - // region, which messes up the assertion below, since there - // are no cleanup scopes on the stack at the start of - // trans'ing a closure body. I think though that this should - // eventually be fixed by closure bodies not having a parent - // region, though that's a touch unclear, and it might also be - // better just to narrow this assertion more (i.e., by - // excluding id's that correspond to closure bodies only). For - // now we just say that if there is already an AST scope on the stack, - // this new AST scope had better be its immediate child. - let top_scope = self.top_ast_scope(); - let region_maps = &self.ccx.tcx().region_maps; - if top_scope.is_some() { - assert!((region_maps - .opt_encl_scope(region_maps.node_extent(debug_loc.id)) - .map(|s|s.node_id(region_maps)) == top_scope) - || - (region_maps - .opt_encl_scope(region_maps.lookup_code_extent( - region::CodeExtentData::DestructionScope(debug_loc.id))) - .map(|s|s.node_id(region_maps)) == top_scope)); - } - - self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id), - debug_loc.debug_loc())); - } - - fn push_loop_cleanup_scope(&self, - id: ast::NodeId, - exits: [Block<'blk, 'tcx>; EXIT_MAX]) { - debug!("push_loop_cleanup_scope({})", - self.ccx.tcx().map.node_to_string(id)); - assert_eq!(Some(id), self.top_ast_scope()); - - // Just copy the debuginfo source location from the enclosing scope - let debug_loc = self.scopes - .borrow() - .last() - .unwrap() - .debug_loc; - - self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc)); - } - - fn push_custom_cleanup_scope(&self) -> CustomScopeIndex { +impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { + pub fn push_custom_cleanup_scope(&self) -> CustomScopeIndex { let index = self.scopes_len(); debug!("push_custom_cleanup_scope(): {}", index); @@ -306,53 +174,14 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { .map(|opt_scope| opt_scope.debug_loc) .unwrap_or(DebugLoc::None); - self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc)); - CustomScopeIndex { index: index } - } - - fn push_custom_cleanup_scope_with_debug_loc(&self, - debug_loc: NodeIdAndSpan) - -> CustomScopeIndex { - let index = self.scopes_len(); - debug!("push_custom_cleanup_scope(): {}", index); - - self.push_scope(CleanupScope::new(CustomScopeKind, - debug_loc.debug_loc())); + self.push_scope(CleanupScope::new(debug_loc)); CustomScopeIndex { index: index } } - /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup - /// stack, and generates the code to do its cleanups for normal exit. - fn pop_and_trans_ast_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - cleanup_scope: ast::NodeId) - -> Block<'blk, 'tcx> { - debug!("pop_and_trans_ast_cleanup_scope({})", - self.ccx.tcx().map.node_to_string(cleanup_scope)); - - assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope))); - - let scope = self.pop_scope(); - self.trans_scope_cleanups(bcx, &scope) - } - - /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the - /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by - /// branching to a block generated by `normal_exit_block`. - fn pop_loop_cleanup_scope(&self, - cleanup_scope: ast::NodeId) { - debug!("pop_loop_cleanup_scope({})", - self.ccx.tcx().map.node_to_string(cleanup_scope)); - - assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope))); - - let _ = self.pop_scope(); - } - /// Removes the top cleanup scope from the stack without executing its cleanups. The top /// cleanup scope must be the temporary scope `custom_scope`. - fn pop_custom_cleanup_scope(&self, - custom_scope: CustomScopeIndex) { + pub fn pop_custom_cleanup_scope(&self, + custom_scope: CustomScopeIndex) { debug!("pop_custom_cleanup_scope({})", custom_scope.index); assert!(self.is_valid_to_pop_custom_scope(custom_scope)); let _ = self.pop_scope(); @@ -360,10 +189,10 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// Removes the top cleanup scope from the stack, which must be a temporary scope, and /// generates the code to do its cleanups for normal exit. - fn pop_and_trans_custom_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - custom_scope: CustomScopeIndex) - -> Block<'blk, 'tcx> { + pub fn pop_and_trans_custom_cleanup_scope(&self, + bcx: Block<'blk, 'tcx>, + custom_scope: CustomScopeIndex) + -> Block<'blk, 'tcx> { debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); assert!(self.is_valid_to_pop_custom_scope(custom_scope)); @@ -371,100 +200,27 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { self.trans_scope_cleanups(bcx, &scope) } - /// Returns the id of the top-most loop scope - fn top_loop_scope(&self) -> ast::NodeId { - for scope in self.scopes.borrow().iter().rev() { - if let LoopScopeKind(id, _) = scope.kind { - return id; - } - } - bug!("no loop scope found"); - } - - /// Returns a block to branch to which will perform all pending cleanups and - /// then break/continue (depending on `exit`) out of the loop with id - /// `cleanup_scope` - fn normal_exit_block(&'blk self, - cleanup_scope: ast::NodeId, - exit: usize) -> BasicBlockRef { - self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit)) - } - - /// Returns a block to branch to which will perform all pending cleanups and - /// then return from this function - fn return_exit_block(&'blk self) -> BasicBlockRef { - self.trans_cleanups_to_exit_scope(ReturnExit) - } - - fn schedule_lifetime_end(&self, - cleanup_scope: ScopeId, - val: ValueRef) { - let drop = box LifetimeEnd { - ptr: val, - }; - - debug!("schedule_lifetime_end({:?}, val={:?})", - cleanup_scope, Value(val)); - - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - /// Schedules a (deep) drop of `val`, which is a pointer to an instance of /// `ty` - fn schedule_drop_mem(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>, - drop_hint: Option>) { + pub fn schedule_drop_mem(&self, + cleanup_scope: CustomScopeIndex, + val: ValueRef, + ty: Ty<'tcx>) { if !self.type_needs_drop(ty) { return; } - let drop_hint = drop_hint.map(|hint|hint.to_value()); - let drop = box DropValue { + let drop = DropValue { is_immediate: false, val: val, ty: ty, - fill_on_drop: false, skip_dtor: false, - drop_hint: drop_hint, }; - debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}", + debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) skip_dtor={}", cleanup_scope, Value(val), ty, - drop.fill_on_drop, drop.skip_dtor); - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - - /// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty` - fn schedule_drop_and_fill_mem(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>, - drop_hint: Option>) { - if !self.type_needs_drop(ty) { return; } - - let drop_hint = drop_hint.map(|datum|datum.to_value()); - let drop = box DropValue { - is_immediate: false, - val: val, - ty: ty, - fill_on_drop: true, - skip_dtor: false, - drop_hint: drop_hint, - }; - - debug!("schedule_drop_and_fill_mem({:?}, val={:?}, ty={:?}, - fill_on_drop={}, skip_dtor={}, has_drop_hint={})", - cleanup_scope, - Value(val), - ty, - drop.fill_on_drop, - drop.skip_dtor, - drop_hint.is_some()); - - self.schedule_clean(cleanup_scope, drop as CleanupObj); + self.schedule_clean(cleanup_scope, drop); } /// Issue #23611: Schedules a (deep) drop of the contents of @@ -472,110 +228,55 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { /// `ty`. The scheduled code handles extracting the discriminant /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. - fn schedule_drop_adt_contents(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>) { + pub fn schedule_drop_adt_contents(&self, + cleanup_scope: CustomScopeIndex, + val: ValueRef, + ty: Ty<'tcx>) { // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. if !self.type_needs_drop(ty) { return; } - let drop = box DropValue { + let drop = DropValue { is_immediate: false, val: val, ty: ty, - fill_on_drop: false, skip_dtor: true, - drop_hint: None, }; - debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}", + debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) skip_dtor={}", cleanup_scope, Value(val), ty, - drop.fill_on_drop, drop.skip_dtor); - self.schedule_clean(cleanup_scope, drop as CleanupObj); + self.schedule_clean(cleanup_scope, drop); } /// Schedules a (deep) drop of `val`, which is an instance of `ty` - fn schedule_drop_immediate(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>) { + pub fn schedule_drop_immediate(&self, + cleanup_scope: CustomScopeIndex, + val: ValueRef, + ty: Ty<'tcx>) { if !self.type_needs_drop(ty) { return; } - let drop = Box::new(DropValue { + let drop = DropValue { is_immediate: true, val: val, ty: ty, - fill_on_drop: false, skip_dtor: false, - drop_hint: None, - }); + }; - debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}", + debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) skip_dtor={}", cleanup_scope, Value(val), ty, - drop.fill_on_drop, drop.skip_dtor); - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - - /// Schedules a call to `free(val)`. Note that this is a shallow operation. - fn schedule_free_value(&self, - cleanup_scope: ScopeId, - val: ValueRef, - heap: Heap, - content_ty: Ty<'tcx>) { - let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty }; - - debug!("schedule_free_value({:?}, val={:?}, heap={:?})", - cleanup_scope, Value(val), heap); - - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - - fn schedule_clean(&self, - cleanup_scope: ScopeId, - cleanup: CleanupObj<'tcx>) { - match cleanup_scope { - AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup), - CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup), - } - } - - /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not - /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary - /// scope. - fn schedule_clean_in_ast_scope(&self, - cleanup_scope: ast::NodeId, - cleanup: CleanupObj<'tcx>) { - debug!("schedule_clean_in_ast_scope(cleanup_scope={})", - cleanup_scope); - - for scope in self.scopes.borrow_mut().iter_mut().rev() { - if scope.kind.is_ast_with_id(cleanup_scope) { - scope.cleanups.push(cleanup); - scope.cached_landing_pad = None; - return; - } else { - // will be adding a cleanup to some enclosing scope - scope.clear_cached_exits(); - } - } - - bug!("no cleanup scope {} found", - self.ccx.tcx().map.node_to_string(cleanup_scope)); + self.schedule_clean(cleanup_scope, drop); } /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope. - fn schedule_clean_in_custom_scope(&self, - custom_scope: CustomScopeIndex, - cleanup: CleanupObj<'tcx>) { + fn schedule_clean(&self, custom_scope: CustomScopeIndex, cleanup: DropValue<'tcx>) { debug!("schedule_clean_in_custom_scope(custom_scope={})", custom_scope.index); @@ -588,14 +289,14 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { } /// Returns true if there are pending cleanups that should execute on panic. - fn needs_invoke(&self) -> bool { + pub fn needs_invoke(&self) -> bool { self.scopes.borrow().iter().rev().any(|s| s.needs_invoke()) } /// Returns a basic block to branch to in the event of a panic. This block /// will run the panic cleanups and eventually resume the exception that /// caused the landing pad to be run. - fn get_landing_pad(&'blk self) -> BasicBlockRef { + pub fn get_landing_pad(&'blk self) -> BasicBlockRef { let _icx = base::push_ctxt("get_landing_pad"); debug!("get_landing_pad"); @@ -625,25 +326,6 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { return llbb; } -} - -impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { - /// Returns the id of the current top-most AST scope, if any. - fn top_ast_scope(&self) -> Option { - for scope in self.scopes.borrow().iter().rev() { - match scope.kind { - CustomScopeKind | LoopScopeKind(..) => {} - AstScopeKind(i) => { - return Some(i); - } - } - } - None - } - - fn top_nonempty_cleanup_scope(&self) -> Option { - self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty()) - } fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { self.is_valid_custom_scope(custom_scope) && @@ -652,14 +334,13 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { let scopes = self.scopes.borrow(); - custom_scope.index < scopes.len() && - (*scopes)[custom_scope.index].kind.is_temp() + custom_scope.index < scopes.len() } /// Generates the cleanups for `scope` into `bcx` fn trans_scope_cleanups(&self, // cannot borrow self, will recurse bcx: Block<'blk, 'tcx>, - scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> { + scope: &CleanupScope<'tcx>) -> Block<'blk, 'tcx> { let mut bcx = bcx; if !bcx.unreachable.get() { @@ -674,11 +355,11 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx self.scopes.borrow().len() } - fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) { + fn push_scope(&self, scope: CleanupScope<'tcx>) { self.scopes.borrow_mut().push(scope) } - fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> { + fn pop_scope(&self) -> CleanupScope<'tcx> { debug!("popping cleanup scope {}, {} scopes remaining", self.top_scope(|s| s.block_name("")), self.scopes_len() - 1); @@ -686,7 +367,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx self.scopes.borrow_mut().pop().unwrap() } - fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R { + fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'tcx>) -> R { f(self.scopes.borrow().last().unwrap()) } @@ -738,7 +419,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx UnwindExit(val) => { // Generate a block that will resume unwinding to the // calling function - let bcx = self.new_block("resume", None); + let bcx = self.new_block("resume"); match val { UnwindKind::LandingPad => { let addr = self.landingpad_alloca.get() @@ -755,15 +436,6 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx prev_llbb = bcx.llbb; break; } - - ReturnExit => { - prev_llbb = self.get_llreturn(); - break - } - - LoopExit(id, _) => { - bug!("cannot exit from scope {}, not in scope", id); - } } } @@ -782,20 +454,6 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx skip = last_cleanup; break; } - - // If we are searching for a loop exit, - // and this scope is that loop, then stop popping and set - // `prev_llbb` to the appropriate exit block from the loop. - let scope = popped_scopes.last().unwrap(); - match label { - UnwindExit(..) | ReturnExit => { } - LoopExit(id, exit) => { - if let Some(exit) = scope.kind.early_exit_block(id, exit) { - prev_llbb = exit; - break - } - } - } } debug!("trans_cleanups_to_exit_scope: popped {} scopes", @@ -826,7 +484,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx let name = scope.block_name("clean"); debug!("generating cleanups for {}", name); - let bcx_in = self.new_block(&name[..], None); + let bcx_in = self.new_block(&name[..]); let exit_label = label.start(bcx_in); let mut bcx_out = bcx_in; let len = scope.cleanups.len(); @@ -869,7 +527,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx Some(llbb) => return llbb, None => { let name = last_scope.block_name("unwind"); - pad_bcx = self.new_block(&name[..], None); + pad_bcx = self.new_block(&name[..]); last_scope.cached_landing_pad = Some(pad_bcx.llbb); } } @@ -923,12 +581,9 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx } } -impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> { - fn new(kind: CleanupScopeKind<'blk, 'tcx>, - debug_loc: DebugLoc) - -> CleanupScope<'blk, 'tcx> { +impl<'tcx> CleanupScope<'tcx> { + fn new(debug_loc: DebugLoc) -> CleanupScope<'tcx> { CleanupScope { - kind: kind, debug_loc: debug_loc, cleanups: vec!(), cached_early_exits: vec!(), @@ -936,11 +591,6 @@ impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> { } } - fn clear_cached_exits(&mut self) { - self.cached_early_exits = vec!(); - self.cached_landing_pad = None; - } - fn cached_early_exit(&self, label: EarlyExitLabel) -> Option<(BasicBlockRef, usize)> { @@ -961,62 +611,13 @@ impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> { /// True if this scope has cleanups that need unwinding fn needs_invoke(&self) -> bool { - self.cached_landing_pad.is_some() || - self.cleanups.iter().any(|c| c.must_unwind()) + !self.cleanups.is_empty() } /// Returns a suitable name to use for the basic block that handles this cleanup scope fn block_name(&self, prefix: &str) -> String { - match self.kind { - CustomScopeKind => format!("{}_custom_", prefix), - AstScopeKind(id) => format!("{}_ast_{}_", prefix, id), - LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id), - } - } - - /// Manipulate cleanup scope for call arguments. Conceptually, each - /// argument to a call is an lvalue, and performing the call moves each - /// of the arguments into a new rvalue (which gets cleaned up by the - /// callee). As an optimization, instead of actually performing all of - /// those moves, trans just manipulates the cleanup scope to obtain the - /// same effect. - pub fn drop_non_lifetime_clean(&mut self) { - self.cleanups.retain(|c| c.is_lifetime_end()); - self.clear_cached_exits(); - } -} - -impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> { - fn is_temp(&self) -> bool { - match *self { - CustomScopeKind => true, - LoopScopeKind(..) | AstScopeKind(..) => false, - } - } - - fn is_ast_with_id(&self, id: ast::NodeId) -> bool { - match *self { - CustomScopeKind | LoopScopeKind(..) => false, - AstScopeKind(i) => i == id - } - } - - fn is_loop_with_id(&self, id: ast::NodeId) -> bool { - match *self { - CustomScopeKind | AstScopeKind(..) => false, - LoopScopeKind(i, _) => i == id - } - } - - /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None` - fn early_exit_block(&self, - id: ast::NodeId, - exit: usize) -> Option { - match *self { - LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb), - _ => None, - } + format!("{}_custom_", prefix) } } @@ -1057,7 +658,6 @@ impl EarlyExitLabel { bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::gnu()))); *self } - label => label, } } } @@ -1080,20 +680,10 @@ pub struct DropValue<'tcx> { is_immediate: bool, val: ValueRef, ty: Ty<'tcx>, - fill_on_drop: bool, skip_dtor: bool, - drop_hint: Option, } -impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> { - fn must_unwind(&self) -> bool { - true - } - - fn is_lifetime_end(&self) -> bool { - false - } - +impl<'tcx> DropValue<'tcx> { fn trans<'blk>(&self, bcx: Block<'blk, 'tcx>, debug_loc: DebugLoc) @@ -1107,180 +697,8 @@ impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> { let bcx = if self.is_immediate { glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor) } else { - glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor, self.drop_hint) + glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor) }; - if self.fill_on_drop { - base::drop_done_fill_mem(bcx, self.val, self.ty); - } bcx } } - -#[derive(Copy, Clone, Debug)] -pub enum Heap { - HeapExchange -} - -#[derive(Copy, Clone)] -pub struct FreeValue<'tcx> { - ptr: ValueRef, - heap: Heap, - content_ty: Ty<'tcx> -} - -impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> { - fn must_unwind(&self) -> bool { - true - } - - fn is_lifetime_end(&self) -> bool { - false - } - - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - match self.heap { - HeapExchange => { - glue::trans_exchange_free_ty(bcx, - self.ptr, - self.content_ty, - debug_loc) - } - } - } -} - -#[derive(Copy, Clone)] -pub struct LifetimeEnd { - ptr: ValueRef, -} - -impl<'tcx> Cleanup<'tcx> for LifetimeEnd { - fn must_unwind(&self) -> bool { - false - } - - fn is_lifetime_end(&self) -> bool { - true - } - - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - debug_loc.apply(bcx.fcx); - base::call_lifetime_end(bcx, self.ptr); - bcx - } -} - -pub fn temporary_scope(tcx: TyCtxt, - id: ast::NodeId) - -> ScopeId { - match tcx.region_maps.temporary_scope(id) { - Some(scope) => { - let r = AstScope(scope.node_id(&tcx.region_maps)); - debug!("temporary_scope({}) = {:?}", id, r); - r - } - None => { - bug!("no temporary scope available for expr {}", id) - } - } -} - -pub fn var_scope(tcx: TyCtxt, - id: ast::NodeId) - -> ScopeId { - let r = AstScope(tcx.region_maps.var_scope(id).node_id(&tcx.region_maps)); - debug!("var_scope({}) = {:?}", id, r); - r -} - -/////////////////////////////////////////////////////////////////////////// -// These traits just exist to put the methods into this file. - -pub trait CleanupMethods<'blk, 'tcx> { - fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan); - fn push_loop_cleanup_scope(&self, - id: ast::NodeId, - exits: [Block<'blk, 'tcx>; EXIT_MAX]); - fn push_custom_cleanup_scope(&self) -> CustomScopeIndex; - fn push_custom_cleanup_scope_with_debug_loc(&self, - debug_loc: NodeIdAndSpan) - -> CustomScopeIndex; - fn pop_and_trans_ast_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - cleanup_scope: ast::NodeId) - -> Block<'blk, 'tcx>; - fn pop_loop_cleanup_scope(&self, - cleanup_scope: ast::NodeId); - fn pop_custom_cleanup_scope(&self, - custom_scope: CustomScopeIndex); - fn pop_and_trans_custom_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - custom_scope: CustomScopeIndex) - -> Block<'blk, 'tcx>; - fn top_loop_scope(&self) -> ast::NodeId; - fn normal_exit_block(&'blk self, - cleanup_scope: ast::NodeId, - exit: usize) -> BasicBlockRef; - fn return_exit_block(&'blk self) -> BasicBlockRef; - fn schedule_lifetime_end(&self, - cleanup_scope: ScopeId, - val: ValueRef); - fn schedule_drop_mem(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>, - drop_hint: Option>); - fn schedule_drop_and_fill_mem(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>, - drop_hint: Option>); - fn schedule_drop_adt_contents(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>); - fn schedule_drop_immediate(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>); - fn schedule_free_value(&self, - cleanup_scope: ScopeId, - val: ValueRef, - heap: Heap, - content_ty: Ty<'tcx>); - fn schedule_clean(&self, - cleanup_scope: ScopeId, - cleanup: CleanupObj<'tcx>); - fn schedule_clean_in_ast_scope(&self, - cleanup_scope: ast::NodeId, - cleanup: CleanupObj<'tcx>); - fn schedule_clean_in_custom_scope(&self, - custom_scope: CustomScopeIndex, - cleanup: CleanupObj<'tcx>); - fn needs_invoke(&self) -> bool; - fn get_landing_pad(&'blk self) -> BasicBlockRef; -} - -trait CleanupHelperMethods<'blk, 'tcx> { - fn top_ast_scope(&self) -> Option; - fn top_nonempty_cleanup_scope(&self) -> Option; - fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool; - fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool; - fn trans_scope_cleanups(&self, - bcx: Block<'blk, 'tcx>, - scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>; - fn trans_cleanups_to_exit_scope(&'blk self, - label: EarlyExitLabel) - -> BasicBlockRef; - fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef; - fn scopes_len(&self) -> usize; - fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>); - fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>; - fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R; -} diff --git a/src/librustc_trans/closure.rs b/src/librustc_trans/closure.rs index 77b2c43167..a1d645fb99 100644 --- a/src/librustc_trans/closure.rs +++ b/src/librustc_trans/closure.rs @@ -9,114 +9,21 @@ // except according to those terms. use arena::TypedArena; -use back::symbol_names; -use llvm::{self, ValueRef, get_param, get_params}; +use llvm::{self, ValueRef, get_params}; use rustc::hir::def_id::DefId; use abi::{Abi, FnType}; -use adt; use attributes; use base::*; -use build::*; -use callee::{self, ArgVals, Callee}; -use cleanup::{CleanupMethods, CustomScope, ScopeId}; +use callee::{self, Callee}; use common::*; -use datum::{ByRef, Datum, lvalue_scratch_datum}; -use datum::{rvalue_scratch_datum, Rvalue}; -use debuginfo::{self, DebugLoc}; +use debuginfo::{DebugLoc}; use declare; -use expr; use monomorphize::{Instance}; use value::Value; -use Disr; use rustc::ty::{self, Ty, TyCtxt}; -use session::config::FullDebugInfo; - -use syntax::ast; use rustc::hir; -use libc::c_uint; - -fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - closure_def_id: DefId, - arg_scope_id: ScopeId, - id: ast::NodeId) { - let _icx = push_ctxt("closure::load_closure_environment"); - let kind = kind_for_closure(bcx.ccx(), closure_def_id); - - let env_arg = &bcx.fcx.fn_ty.args[0]; - let mut env_idx = bcx.fcx.fn_ty.ret.is_indirect() as usize; - - // Special case for small by-value selfs. - let llenv = if kind == ty::ClosureKind::FnOnce && !env_arg.is_indirect() { - let closure_ty = node_id_type(bcx, id); - let llenv = rvalue_scratch_datum(bcx, closure_ty, "closure_env").val; - env_arg.store_fn_arg(&bcx.build(), &mut env_idx, llenv); - llenv - } else { - get_param(bcx.fcx.llfn, env_idx as c_uint) - }; - - // Store the pointer to closure data in an alloca for debug info because that's what the - // llvm.dbg.declare intrinsic expects - let env_pointer_alloca = if bcx.sess().opts.debuginfo == FullDebugInfo { - let alloc = alloca(bcx, val_ty(llenv), "__debuginfo_env_ptr"); - Store(bcx, llenv, alloc); - Some(alloc) - } else { - None - }; - - bcx.tcx().with_freevars(id, |fv| { - for (i, freevar) in fv.iter().enumerate() { - let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(), - closure_expr_id: id }; - let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap(); - let mut upvar_ptr = StructGEP(bcx, llenv, i); - let captured_by_ref = match upvar_capture { - ty::UpvarCapture::ByValue => false, - ty::UpvarCapture::ByRef(..) => { - upvar_ptr = Load(bcx, upvar_ptr); - true - } - }; - let node_id = freevar.def.var_id(); - bcx.fcx.llupvars.borrow_mut().insert(node_id, upvar_ptr); - - if kind == ty::ClosureKind::FnOnce && !captured_by_ref { - let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id); - bcx.fcx.schedule_drop_mem(arg_scope_id, - upvar_ptr, - node_id_type(bcx, node_id), - hint) - } - - if let Some(env_pointer_alloca) = env_pointer_alloca { - debuginfo::create_captured_var_metadata( - bcx, - node_id, - env_pointer_alloca, - i, - captured_by_ref, - freevar.span); - } - } - }) -} - -pub enum ClosureEnv { - NotClosure, - Closure(DefId, ast::NodeId), -} - -impl ClosureEnv { - pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) { - if let ClosureEnv::Closure(def_id, id) = self { - load_closure_environment(bcx, def_id, arg_scope, id); - } - } -} - fn get_self_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, closure_id: DefId, fn_ty: Ty<'tcx>) @@ -154,8 +61,7 @@ fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // Compute the rust-call form of the closure call method. let sig = &tcx.closure_type(closure_id, substs).sig; - let sig = tcx.erase_late_bound_regions(sig); - let sig = tcx.normalize_associated_type(&sig); + let sig = tcx.erase_late_bound_regions_and_normalize(sig); let closure_type = tcx.mk_closure_from_closure_substs(closure_id, substs); let function_type = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Normal, @@ -181,68 +87,15 @@ fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, llfn } -fn translating_closure_body_via_mir_will_fail(ccx: &CrateContext, - closure_def_id: DefId) - -> bool { - let default_to_mir = ccx.sess().opts.debugging_opts.orbit; - let invert = if default_to_mir { "rustc_no_mir" } else { "rustc_mir" }; - let use_mir = default_to_mir ^ ccx.tcx().has_attr(closure_def_id, invert); - - !use_mir -} - pub fn trans_closure_body_via_mir<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, closure_def_id: DefId, closure_substs: ty::ClosureSubsts<'tcx>) { - use syntax::ast::DUMMY_NODE_ID; - use syntax_pos::DUMMY_SP; - use syntax::ptr::P; - - trans_closure_expr(Dest::Ignore(ccx), - &hir::FnDecl { - inputs: P::new(), - output: hir::Return(P(hir::Ty { - id: DUMMY_NODE_ID, - span: DUMMY_SP, - node: hir::Ty_::TyNever, - })), - variadic: false - }, - &hir::Block { - stmts: P::new(), - expr: None, - id: DUMMY_NODE_ID, - rules: hir::DefaultBlock, - span: DUMMY_SP - }, - DUMMY_NODE_ID, - closure_def_id, - closure_substs); -} - -pub enum Dest<'a, 'tcx: 'a> { - SaveIn(Block<'a, 'tcx>, ValueRef), - Ignore(&'a CrateContext<'a, 'tcx>) -} - -pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, - decl: &hir::FnDecl, - body: &hir::Block, - id: ast::NodeId, - closure_def_id: DefId, // (*) - closure_substs: ty::ClosureSubsts<'tcx>) - -> Option> -{ // (*) Note that in the case of inlined functions, the `closure_def_id` will be the // defid of the closure in its original crate, whereas `id` will be the id of the local // inlined copy. - debug!("trans_closure_expr(id={:?}, closure_def_id={:?}, closure_substs={:?})", - id, closure_def_id, closure_substs); + debug!("trans_closure_body_via_mir(closure_def_id={:?}, closure_substs={:?})", + closure_def_id, closure_substs); - let ccx = match dest { - Dest::SaveIn(bcx, _) => bcx.ccx(), - Dest::Ignore(ccx) => ccx - }; let tcx = ccx.tcx(); let _icx = push_ctxt("closure::trans_closure_expr"); @@ -255,10 +108,10 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, unsafe { if ccx.sess().target.target.options.allows_weak_linkage { - llvm::LLVMSetLinkage(llfn, llvm::WeakODRLinkage); + llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::WeakODRLinkage); llvm::SetUniqueComdat(ccx.llmod(), llfn); } else { - llvm::LLVMSetLinkage(llfn, llvm::InternalLinkage); + llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage); } } @@ -272,8 +125,7 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, // of the closure expression. let sig = &tcx.closure_type(closure_def_id, closure_substs).sig; - let sig = tcx.erase_late_bound_regions(sig); - let sig = tcx.normalize_associated_type(&sig); + let sig = tcx.erase_late_bound_regions_and_normalize(sig); let closure_type = tcx.mk_closure_from_closure_substs(closure_def_id, closure_substs); @@ -285,57 +137,19 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, }; trans_closure(ccx, - decl, - body, llfn, Instance::new(closure_def_id, param_substs), - id, &sig, - Abi::RustCall, - ClosureEnv::Closure(closure_def_id, id)); + Abi::RustCall); ccx.instances().borrow_mut().insert(instance, llfn); } - - // Don't hoist this to the top of the function. It's perfectly legitimate - // to have a zero-size closure (in which case dest will be `Ignore`) and - // we must still generate the closure body. - let (mut bcx, dest_addr) = match dest { - Dest::SaveIn(bcx, p) => (bcx, p), - Dest::Ignore(_) => { - debug!("trans_closure_expr() ignoring result"); - return None; - } - }; - - let repr = adt::represent_type(ccx, node_id_type(bcx, id)); - - // Create the closure. - tcx.with_freevars(id, |fv| { - for (i, freevar) in fv.iter().enumerate() { - let datum = expr::trans_var(bcx, freevar.def); - let upvar_slot_dest = adt::trans_field_ptr( - bcx, &repr, adt::MaybeSizedValue::sized(dest_addr), Disr(0), i); - let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(), - closure_expr_id: id }; - match tcx.upvar_capture(upvar_id).unwrap() { - ty::UpvarCapture::ByValue => { - bcx = datum.store_to(bcx, upvar_slot_dest); - } - ty::UpvarCapture::ByRef(..) => { - Store(bcx, datum.to_llref(), upvar_slot_dest); - } - } - } - }); - adt::trans_set_discr(bcx, &repr, dest_addr, Disr(0)); - - Some(bcx) } pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, closure_def_id: DefId, substs: ty::ClosureSubsts<'tcx>, + method_instance: Instance<'tcx>, trait_closure_kind: ty::ClosureKind) -> ValueRef { @@ -347,32 +161,7 @@ pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, if !ccx.sess().target.target.options.allows_weak_linkage && !ccx.sess().opts.single_codegen_unit() { - if let Some(node_id) = ccx.tcx().map.as_local_node_id(closure_def_id) { - // If the closure is defined in the local crate, we can always just - // translate it. - let (decl, body) = match ccx.tcx().map.expect_expr(node_id).node { - hir::ExprClosure(_, ref decl, ref body, _) => (decl, body), - _ => { unreachable!() } - }; - - trans_closure_expr(Dest::Ignore(ccx), - decl, - body, - node_id, - closure_def_id, - substs); - } else { - // If the closure is defined in an upstream crate, we can only - // translate it if MIR-trans is active. - - if translating_closure_body_via_mir_will_fail(ccx, closure_def_id) { - ccx.sess().fatal("You have run into a known limitation of the \ - MingW toolchain. Either compile with -Zorbit or \ - with -Ccodegen-units=1 to work around it."); - } - - trans_closure_body_via_mir(ccx, closure_def_id, substs); - } + trans_closure_body_via_mir(ccx, closure_def_id, substs); } // If the closure is a Fn closure, but a FnOnce is needed (etc), @@ -408,7 +197,7 @@ pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, // fn call_once(mut self, ...) { call_mut(&mut self, ...) } // // These are both the same at trans time. - trans_fn_once_adapter_shim(ccx, closure_def_id, substs, llfn) + trans_fn_once_adapter_shim(ccx, closure_def_id, substs, method_instance, llfn) } _ => { bug!("trans_closure_adapter_shim: cannot convert {:?} to {:?}", @@ -422,9 +211,14 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( ccx: &'a CrateContext<'a, 'tcx>, closure_def_id: DefId, substs: ty::ClosureSubsts<'tcx>, + method_instance: Instance<'tcx>, llreffn: ValueRef) -> ValueRef { + if let Some(&llfn) = ccx.instances().borrow().get(&method_instance) { + return llfn; + } + debug!("trans_fn_once_adapter_shim(closure_def_id={:?}, substs={:?}, llreffn={:?})", closure_def_id, substs, Value(llreffn)); @@ -453,8 +247,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( assert_eq!(abi, Abi::RustCall); sig.0.inputs[0] = closure_ty; - let sig = tcx.erase_late_bound_regions(&sig); - let sig = tcx.normalize_associated_type(&sig); + let sig = tcx.erase_late_bound_regions_and_normalize(&sig); let fn_ty = FnType::new(ccx, abi, &sig, &[]); let llonce_fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { @@ -464,36 +257,28 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( })); // Create the by-value helper. - let function_name = - symbol_names::internal_name_from_type_and_suffix(ccx, llonce_fn_ty, "once_shim"); - let lloncefn = declare::declare_fn(ccx, &function_name, llonce_fn_ty); + let function_name = method_instance.symbol_name(ccx.shared()); + let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty); attributes::set_frame_pointer_elimination(ccx, lloncefn); let (block_arena, fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena); - let mut bcx = fcx.init(false, None); + let mut bcx = fcx.init(false); // the first argument (`self`) will be the (by value) closure env. - let self_scope = fcx.push_custom_cleanup_scope(); - let self_scope_id = CustomScope(self_scope); let mut llargs = get_params(fcx.llfn); let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize; let env_arg = &fcx.fn_ty.args[0]; let llenv = if env_arg.is_indirect() { - Datum::new(llargs[self_idx], closure_ty, Rvalue::new(ByRef)) - .add_clean(&fcx, self_scope_id) + llargs[self_idx] } else { - unpack_datum!(bcx, lvalue_scratch_datum(bcx, closure_ty, "self", - InitAlloca::Dropped, - self_scope_id, |bcx, llval| { - let mut llarg_idx = self_idx; - env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, llval); - bcx.fcx.schedule_lifetime_end(self_scope_id, llval); - bcx - })).val + let scratch = alloc_ty(bcx, closure_ty, "self"); + let mut llarg_idx = self_idx; + env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, scratch); + scratch }; debug!("trans_fn_once_adapter_shim: env={:?}", Value(llenv)); @@ -510,19 +295,25 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( llargs[self_idx] = llenv; } - let dest = - fcx.llretslotptr.get().map( - |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))); + let dest = fcx.llretslotptr.get(); let callee = Callee { data: callee::Fn(llreffn), ty: llref_fn_ty }; - bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[self_idx..]), dest).bcx; + + // Call the by-ref closure body with `self` in a cleanup scope, + // to drop `self` when the body returns, or in case it unwinds. + let self_scope = fcx.push_custom_cleanup_scope(); + fcx.schedule_drop_mem(self_scope, llenv, closure_ty); + + bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).bcx; fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope); fcx.finish(bcx, DebugLoc::None); + ccx.instances().borrow_mut().insert(method_instance, lloncefn); + lloncefn } diff --git a/src/librustc_trans/collector.rs b/src/librustc_trans/collector.rs index fdd1ee1fba..8112bb8e65 100644 --- a/src/librustc_trans/collector.rs +++ b/src/librustc_trans/collector.rs @@ -153,7 +153,7 @@ //! The collection algorithm handles this more or less transparently. If it is //! about to create a translation item for something with an external `DefId`, //! it will take a look if the MIR for that item is available, and if so just -//! proceed normally. If the MIR is not available, it assumes that that item is +//! proceed normally. If the MIR is not available, it assumes that the item is //! just linked to and no node is created; which is exactly what we want, since //! no machine code should be generated in the current crate for such an item. //! @@ -195,12 +195,13 @@ use rustc::hir::map as hir_map; use rustc::hir::def_id::DefId; use rustc::middle::lang_items::{ExchangeFreeFnLangItem, ExchangeMallocFnLangItem}; use rustc::traits; -use rustc::ty::subst::{self, Substs, Subst}; +use rustc::ty::subst::{Substs, Subst}; use rustc::ty::{self, TypeFoldable, TyCtxt}; use rustc::ty::adjustment::CustomCoerceUnsized; use rustc::mir::repr as mir; use rustc::mir::visit as mir_visit; use rustc::mir::visit::Visitor as MirVisitor; +use rustc::mir::repr::Location; use rustc_const_eval as const_eval; @@ -209,9 +210,8 @@ use errors; use syntax_pos::DUMMY_SP; use base::custom_coerce_unsize_info; use context::SharedCrateContext; -use common::{fulfill_obligation, normalize_and_test_predicates, type_is_sized}; +use common::{fulfill_obligation, type_is_sized}; use glue::{self, DropGlueKind}; -use meth; use monomorphize::{self, Instance}; use util::nodemap::{FnvHashSet, FnvHashMap, DefIdMap}; @@ -400,7 +400,7 @@ fn record_inlining_canditates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, callees: &[TransItem<'tcx>], inlining_map: &mut InliningMap<'tcx>) { let is_inlining_candidate = |trans_item: &TransItem<'tcx>| { - trans_item.is_from_extern_crate() || trans_item.requests_inline(tcx) + trans_item.needs_local_copy(tcx) }; let inlining_candidates = callees.into_iter() @@ -446,7 +446,7 @@ struct MirNeighborCollector<'a, 'tcx: 'a> { impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { - fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>) { + fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) { debug!("visiting rvalue {:?}", *rvalue); match *rvalue { @@ -458,7 +458,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { format!("Could not find MIR for closure: {:?}", def_id) }); - let concrete_substs = monomorphize::apply_param_substs(self.scx.tcx(), + let concrete_substs = monomorphize::apply_param_substs(self.scx, self.param_substs, &substs.func_substs); let concrete_substs = self.scx.tcx().erase_regions(&concrete_substs); @@ -476,11 +476,11 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { // have to instantiate all methods of the trait being cast to, so we // can build the appropriate vtable. mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, target_ty) => { - let target_ty = monomorphize::apply_param_substs(self.scx.tcx(), + let target_ty = monomorphize::apply_param_substs(self.scx, self.param_substs, &target_ty); let source_ty = operand.ty(self.mir, self.scx.tcx()); - let source_ty = monomorphize::apply_param_substs(self.scx.tcx(), + let source_ty = monomorphize::apply_param_substs(self.scx, self.param_substs, &source_ty); let (source_ty, target_ty) = find_vtable_types_for_unsizing(self.scx, @@ -496,7 +496,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { self.output); } } - mir::Rvalue::Box(_) => { + mir::Rvalue::Box(..) => { let exchange_malloc_fn_def_id = self.scx .tcx() @@ -507,7 +507,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { assert!(can_have_local_instance(self.scx.tcx(), exchange_malloc_fn_def_id)); let empty_substs = self.scx.empty_substs_for_def_id(exchange_malloc_fn_def_id); let exchange_malloc_fn_trans_item = - create_fn_trans_item(self.scx.tcx(), + create_fn_trans_item(self.scx, exchange_malloc_fn_def_id, empty_substs, self.param_substs); @@ -517,19 +517,20 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { _ => { /* not interesting */ } } - self.super_rvalue(rvalue); + self.super_rvalue(rvalue, location); } fn visit_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>, - context: mir_visit::LvalueContext) { + context: mir_visit::LvalueContext<'tcx>, + location: Location) { debug!("visiting lvalue {:?}", *lvalue); if let mir_visit::LvalueContext::Drop = context { let ty = lvalue.ty(self.mir, self.scx.tcx()) .to_ty(self.scx.tcx()); - let ty = monomorphize::apply_param_substs(self.scx.tcx(), + let ty = monomorphize::apply_param_substs(self.scx, self.param_substs, &ty); assert!(ty.is_normalized_for_trans()); @@ -537,10 +538,10 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); } - self.super_lvalue(lvalue, context); + self.super_lvalue(lvalue, context, location); } - fn visit_operand(&mut self, operand: &mir::Operand<'tcx>) { + fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) { debug!("visiting operand {:?}", *operand); let callee = match *operand { @@ -553,7 +554,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { // references to `const` items if let mir::Literal::Item { def_id, substs } = constant.literal { let tcx = self.scx.tcx(); - let substs = monomorphize::apply_param_substs(tcx, + let substs = monomorphize::apply_param_substs(self.scx, self.param_substs, &substs); @@ -611,7 +612,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { // result in a translation item ... if can_result_in_trans_item(self.scx.tcx(), callee_def_id) { // ... and create one if it does. - let trans_item = create_fn_trans_item(self.scx.tcx(), + let trans_item = create_fn_trans_item(self.scx, callee_def_id, callee_substs, self.param_substs); @@ -620,30 +621,28 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { } } - self.super_operand(operand); + self.super_operand(operand, location); fn can_result_in_trans_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> bool { - if !match tcx.lookup_item_type(def_id).ty.sty { - ty::TyFnDef(def_id, _, _) => { + match tcx.lookup_item_type(def_id).ty.sty { + ty::TyFnDef(def_id, _, f) => { // Some constructors also have type TyFnDef but they are // always instantiated inline and don't result in // translation item. Same for FFI functions. - match tcx.map.get_if_local(def_id) { - Some(hir_map::NodeVariant(_)) | - Some(hir_map::NodeStructCtor(_)) | - Some(hir_map::NodeForeignItem(_)) => false, - Some(_) => true, - None => { - tcx.sess.cstore.variant_kind(def_id).is_none() + if let Some(hir_map::NodeForeignItem(_)) = tcx.map.get_if_local(def_id) { + return false; + } + + if let Some(adt_def) = f.sig.output().skip_binder().ty_adt_def() { + if adt_def.variants.iter().any(|v| def_id == v.did) { + return false; } } } - ty::TyClosure(..) => true, - _ => false - } { - return false; + ty::TyClosure(..) => {} + _ => return false } can_have_local_instance(tcx, def_id) @@ -654,7 +653,8 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { // we would not register drop-glues. fn visit_terminator_kind(&mut self, block: mir::BasicBlock, - kind: &mir::TerminatorKind<'tcx>) { + kind: &mir::TerminatorKind<'tcx>, + location: Location) { let tcx = self.scx.tcx(); match *kind { mir::TerminatorKind::Call { @@ -667,7 +667,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { if is_drop_in_place_intrinsic(tcx, def_id, bare_fn_ty) => { let operand_ty = args[0].ty(self.mir, tcx); if let ty::TyRawPtr(mt) = operand_ty.sty { - let operand_ty = monomorphize::apply_param_substs(tcx, + let operand_ty = monomorphize::apply_param_substs(self.scx, self.param_substs, &mt.ty); let ty = glue::get_drop_glue_type(tcx, operand_ty); @@ -682,7 +682,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { _ => { /* Nothing to do. */ } } - self.super_terminator_kind(block, kind); + self.super_terminator_kind(block, kind, location); fn is_drop_in_place_intrinsic<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, @@ -729,10 +729,10 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, assert!(can_have_local_instance(scx.tcx(), exchange_free_fn_def_id)); let fn_substs = scx.empty_substs_for_def_id(exchange_free_fn_def_id); let exchange_free_fn_trans_item = - create_fn_trans_item(scx.tcx(), + create_fn_trans_item(scx, exchange_free_fn_def_id, fn_substs, - scx.tcx().mk_substs(Substs::empty())); + Substs::empty(scx.tcx())); output.push(exchange_free_fn_trans_item); } @@ -740,8 +740,7 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, // If the type implements Drop, also add a translation item for the // monomorphized Drop::drop() implementation. let destructor_did = match ty.sty { - ty::TyStruct(def, _) | - ty::TyEnum(def, _) => def.destructor(), + ty::TyAdt(def, _) => def.destructor(), _ => None }; @@ -753,8 +752,7 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, .drop_trait() .unwrap(); - let self_type_substs = scx.tcx().mk_substs( - Substs::empty().with_self_ty(ty)); + let self_type_substs = Substs::new_trait(scx.tcx(), ty, &[]); let trait_ref = ty::TraitRef { def_id: drop_trait_def_id, @@ -767,10 +765,10 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, }; if can_have_local_instance(scx.tcx(), destructor_did) { - let trans_item = create_fn_trans_item(scx.tcx(), + let trans_item = create_fn_trans_item(scx, destructor_did, substs, - scx.tcx().mk_substs(Substs::empty())); + Substs::empty(scx.tcx())); output.push(trans_item); } @@ -795,10 +793,9 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, ty::TyTrait(_) => { /* nothing to do */ } - ty::TyStruct(ref adt_def, substs) | - ty::TyEnum(ref adt_def, substs) => { + ty::TyAdt(adt_def, substs) => { for field in adt_def.all_fields() { - let field_type = monomorphize::apply_param_substs(scx.tcx(), + let field_type = monomorphize::apply_param_substs(scx, substs, &field.unsubst_ty()); let field_type = glue::get_drop_glue_type(scx.tcx(), field_type); @@ -854,26 +851,15 @@ fn do_static_dispatch<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, fn_substs, param_substs); - let is_trait_method = scx.tcx().trait_of_item(fn_def_id).is_some(); - - if is_trait_method { + if let Some(trait_def_id) = scx.tcx().trait_of_item(fn_def_id) { match scx.tcx().impl_or_trait_item(fn_def_id) { ty::MethodTraitItem(ref method) => { - match method.container { - ty::TraitContainer(trait_def_id) => { - debug!(" => trait method, attempting to find impl"); - do_static_trait_method_dispatch(scx, - method, - trait_def_id, - fn_substs, - param_substs) - } - ty::ImplContainer(_) => { - // This is already a concrete implementation - debug!(" => impl method"); - Some((fn_def_id, fn_substs)) - } - } + debug!(" => trait method, attempting to find impl"); + do_static_trait_method_dispatch(scx, + method, + trait_def_id, + fn_substs, + param_substs) } _ => bug!() } @@ -903,28 +889,17 @@ fn do_static_trait_method_dispatch<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, callee_substs, param_substs); - let rcvr_substs = monomorphize::apply_param_substs(tcx, + let rcvr_substs = monomorphize::apply_param_substs(scx, param_substs, &callee_substs); - - let trait_ref = ty::Binder(rcvr_substs.to_trait_ref(tcx, trait_id)); - let trait_ref = tcx.normalize_associated_type(&trait_ref); - let vtbl = fulfill_obligation(scx, DUMMY_SP, trait_ref); + let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs); + let vtbl = fulfill_obligation(scx, DUMMY_SP, ty::Binder(trait_ref)); // Now that we know which impl is being used, we can dispatch to // the actual function: match vtbl { - traits::VtableImpl(traits::VtableImplData { - impl_def_id: impl_did, - substs: impl_substs, - nested: _ }) => - { - let callee_substs = impl_substs.with_method_from(&rcvr_substs); - let impl_method = meth::get_impl_method(tcx, - impl_did, - tcx.mk_substs(callee_substs), - trait_method.name); - Some((impl_method.method.def_id, &impl_method.substs)) + traits::VtableImpl(impl_data) => { + Some(traits::find_method(tcx, trait_method.name, rcvr_substs, &impl_data)) } // If we have a closure or a function pointer, we will also encounter // the concrete closure/function somewhere else (during closure or fn @@ -998,8 +973,8 @@ fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, } } - (&ty::TyStruct(source_adt_def, source_substs), - &ty::TyStruct(target_adt_def, target_substs)) => { + (&ty::TyAdt(source_adt_def, source_substs), + &ty::TyAdt(target_adt_def, target_substs)) => { assert_eq!(source_adt_def, target_adt_def); let kind = custom_coerce_unsize_info(scx, source_ty, target_ty); @@ -1026,11 +1001,13 @@ fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, } } -fn create_fn_trans_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +fn create_fn_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, def_id: DefId, fn_substs: &'tcx Substs<'tcx>, param_substs: &'tcx Substs<'tcx>) -> TransItem<'tcx> { + let tcx = scx.tcx(); + debug!("create_fn_trans_item(def_id={}, fn_substs={:?}, param_substs={:?})", def_id_to_string(tcx, def_id), fn_substs, @@ -1039,7 +1016,7 @@ fn create_fn_trans_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // We only get here, if fn_def_id either designates a local item or // an inlineable external item. Non-inlineable external items are // ignored because we don't want to generate any code for them. - let concrete_substs = monomorphize::apply_param_substs(tcx, + let concrete_substs = monomorphize::apply_param_substs(scx, param_substs, &fn_substs); assert!(concrete_substs.is_normalized_for_trans(), @@ -1057,44 +1034,20 @@ fn create_trans_items_for_vtable_methods<'a, 'tcx>(scx: &SharedCrateContext<'a, assert!(!trait_ty.needs_subst() && !impl_ty.needs_subst()); if let ty::TyTrait(ref trait_ty) = trait_ty.sty { - let poly_trait_ref = trait_ty.principal_trait_ref_with_self_ty(scx.tcx(), - impl_ty); + let poly_trait_ref = trait_ty.principal.with_self_ty(scx.tcx(), impl_ty); + let param_substs = Substs::empty(scx.tcx()); // Walk all methods of the trait, including those of its supertraits - for trait_ref in traits::supertraits(scx.tcx(), poly_trait_ref) { - let vtable = fulfill_obligation(scx, DUMMY_SP, trait_ref); - match vtable { - traits::VtableImpl( - traits::VtableImplData { - impl_def_id, - substs, - nested: _ }) => { - let items = meth::get_vtable_methods(scx.tcx(), impl_def_id, substs) - .into_iter() - // filter out None values - .filter_map(|opt_impl_method| opt_impl_method) - // create translation items - .filter_map(|impl_method| { - if can_have_local_instance(scx.tcx(), impl_method.method.def_id) { - Some(create_fn_trans_item(scx.tcx(), - impl_method.method.def_id, - impl_method.substs, - scx.tcx().mk_substs(Substs::empty()))) - } else { - None - } - }); - - output.extend(items); - - // Also add the destructor - let dg_type = glue::get_drop_glue_type(scx.tcx(), - trait_ref.self_ty()); - output.push(TransItem::DropGlue(DropGlueKind::Ty(dg_type))); - } - _ => { /* */ } - } - } + let methods = traits::get_vtable_methods(scx.tcx(), poly_trait_ref); + let methods = methods.filter_map(|method| method) + .filter_map(|(def_id, substs)| do_static_dispatch(scx, def_id, substs, param_substs)) + .filter(|&(def_id, _)| can_have_local_instance(scx.tcx(), def_id)) + .map(|(def_id, substs)| create_fn_trans_item(scx, def_id, substs, param_substs)); + output.extend(methods); + + // Also add the destructor + let dg_type = glue::get_drop_glue_type(scx.tcx(), impl_ty); + output.push(TransItem::DropGlue(DropGlueKind::Ty(dg_type))); } } @@ -1127,14 +1080,15 @@ impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> { hir::ItemImpl(..) => { if self.mode == TransItemCollectionMode::Eager { - create_trans_items_for_default_impls(self.scx.tcx(), + create_trans_items_for_default_impls(self.scx, item, self.output); } } - hir::ItemEnum(_, ref generics) | - hir::ItemStruct(_, ref generics) => { + hir::ItemEnum(_, ref generics) | + hir::ItemStruct(_, ref generics) | + hir::ItemUnion(_, ref generics) => { if !generics.is_parameterized() { let ty = { let tables = self.scx.tcx().tables.borrow(); @@ -1161,7 +1115,7 @@ impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> { // const items only generate translation items if they are // actually used somewhere. Just declaring them is insufficient. } - hir::ItemFn(_, _, _, _, ref generics, _) => { + hir::ItemFn(.., ref generics, _) => { if !generics.is_type_parameterized() { let def_id = self.scx.tcx().map.local_def_id(item.id); @@ -1188,7 +1142,7 @@ impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> { let parent_node_id = hir_map.get_parent_node(ii.id); let is_impl_generic = match hir_map.expect_item(parent_node_id) { &hir::Item { - node: hir::ItemImpl(_, _, ref generics, _, _, _), + node: hir::ItemImpl(_, _, ref generics, ..), .. } => { generics.is_type_parameterized() @@ -1215,15 +1169,15 @@ impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> { } } -fn create_trans_items_for_default_impls<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +fn create_trans_items_for_default_impls<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, item: &'tcx hir::Item, output: &mut Vec>) { + let tcx = scx.tcx(); match item.node { hir::ItemImpl(_, _, ref generics, - _, - _, + .., ref items) => { if generics.is_type_parameterized() { return @@ -1235,40 +1189,45 @@ fn create_trans_items_for_default_impls<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id_to_string(tcx, impl_def_id)); if let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) { - let default_impls = tcx.provided_trait_methods(trait_ref.def_id); let callee_substs = tcx.erase_regions(&trait_ref.substs); let overridden_methods: FnvHashSet<_> = items.iter() .map(|item| item.name) .collect(); - for default_impl in default_impls { - if overridden_methods.contains(&default_impl.name) { + for method in tcx.provided_trait_methods(trait_ref.def_id) { + if overridden_methods.contains(&method.name) { continue; } - if default_impl.generics.has_type_params(subst::FnSpace) { + if !method.generics.types.is_empty() { continue; } // The substitutions we have are on the impl, so we grab // the method type from the impl to substitute into. - let mth = meth::get_impl_method(tcx, - impl_def_id, - callee_substs, - default_impl.name); - - assert!(mth.is_provided); - - let predicates = mth.method.predicates.predicates.subst(tcx, &mth.substs); - if !normalize_and_test_predicates(tcx, predicates.into_vec()) { + let impl_substs = Substs::for_item(tcx, impl_def_id, + |_, _| tcx.mk_region(ty::ReErased), + |_, _| tcx.types.err); + let impl_data = traits::VtableImplData { + impl_def_id: impl_def_id, + substs: impl_substs, + nested: vec![] + }; + let (def_id, substs) = traits::find_method(tcx, + method.name, + callee_substs, + &impl_data); + + let predicates = tcx.lookup_predicates(def_id).predicates + .subst(tcx, substs); + if !traits::normalize_and_test_predicates(tcx, predicates) { continue; } - if can_have_local_instance(tcx, default_impl.def_id) { - let empty_substs = tcx.erase_regions(&mth.substs); - let item = create_fn_trans_item(tcx, - default_impl.def_id, + if can_have_local_instance(tcx, method.def_id) { + let item = create_fn_trans_item(scx, + method.def_id, callee_substs, - empty_substs); + tcx.erase_regions(&substs)); output.push(item); } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 79cf77cd9d..5b1f691af8 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -16,7 +16,6 @@ use session::Session; use llvm; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind}; use llvm::{True, False, Bool, OperandBundleDef}; -use rustc::cfg; use rustc::hir::def::Def; use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; @@ -30,7 +29,6 @@ use builder::Builder; use callee::Callee; use cleanup; use consts; -use datum; use debuginfo::{self, DebugLoc}; use declare; use machine; @@ -43,7 +41,6 @@ use rustc::ty::layout::Layout; use rustc::traits::{self, SelectionContext, Reveal}; use rustc::ty::fold::TypeFoldable; use rustc::hir; -use util::nodemap::NodeMap; use arena::TypedArena; use libc::{c_uint, c_char}; @@ -91,8 +88,7 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - return false; } match ty.sty { - ty::TyStruct(..) | ty::TyEnum(..) | ty::TyTuple(..) | ty::TyArray(_, _) | - ty::TyClosure(..) => { + ty::TyAdt(..) | ty::TyTuple(..) | ty::TyArray(..) | ty::TyClosure(..) => { let llty = sizing_type_of(ccx, ty); llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type()) } @@ -104,7 +100,7 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - pub fn type_pair_fields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Option<[Ty<'tcx>; 2]> { match ty.sty { - ty::TyEnum(adt, substs) | ty::TyStruct(adt, substs) => { + ty::TyAdt(adt, substs) => { assert_eq!(adt.variants.len(), 1); let fields = &adt.variants[0].fields; if fields.len() != 2 { @@ -127,18 +123,7 @@ pub fn type_pair_fields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) /// Returns true if the type is represented as a pair of immediates. pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - let tcx = ccx.tcx(); - let layout = tcx.normalizing_infer_ctxt(Reveal::All).enter(|infcx| { - match ty.layout(&infcx) { - Ok(layout) => layout, - Err(err) => { - bug!("type_is_imm_pair: layout for `{:?}` failed: {}", - ty, err); - } - } - }); - - match *layout { + match *ccx.layout_of(ty) { Layout::FatPointer { .. } => true, Layout::Univariant { ref variant, .. } => { // There must be only 2 fields. @@ -165,15 +150,6 @@ pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - llsize_of_alloc(ccx, llty) == 0 } -/// Generates a unique symbol based off the name given. This is used to create -/// unique symbols for things like closures. -pub fn gensym_name(name: &str) -> ast::Name { - let num = token::gensym(name).0; - // use one colon which will get translated to a period by the mangler, and - // we're guaranteed that `num` is globally unique for this crate. - token::gensym(&format!("{}:{}", name, num)) -} - /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". * @@ -202,16 +178,6 @@ pub fn gensym_name(name: &str) -> ast::Name { use Disr; -#[derive(Copy, Clone)] -pub struct NodeIdAndSpan { - pub id: ast::NodeId, - pub span: Span, -} - -pub fn expr_info(expr: &hir::Expr) -> NodeIdAndSpan { - NodeIdAndSpan { id: expr.id, span: expr.span } -} - /// The concrete version of ty::FieldDef. The name is the field index if /// the field is numeric. pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>); @@ -229,7 +195,7 @@ impl<'a, 'tcx> VariantInfo<'tcx> { -> Self { match ty.sty { - ty::TyStruct(adt, substs) | ty::TyEnum(adt, substs) => { + ty::TyAdt(adt, substs) => { let variant = match opt_def { None => adt.struct_variant(), Some(def) => adt.variant_of_def(def) @@ -257,17 +223,6 @@ impl<'a, 'tcx> VariantInfo<'tcx> { } } } - - /// Return the variant corresponding to a given node (e.g. expr) - pub fn of_node(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, id: ast::NodeId) -> Self { - Self::from_ty(tcx, ty, Some(tcx.expect_def(id))) - } - - pub fn field_index(&self, name: ast::Name) -> usize { - self.fields.iter().position(|&Field(n,_)| n == name).unwrap_or_else(|| { - bug!("unknown field `{}`", name) - }) - } } pub struct BuilderRef_res { @@ -289,38 +244,7 @@ pub fn BuilderRef_res(b: BuilderRef) -> BuilderRef_res { } pub fn validate_substs(substs: &Substs) { - assert!(!substs.types.needs_infer()); -} - -// work around bizarre resolve errors -type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>; -pub type LvalueDatum<'tcx> = datum::Datum<'tcx, datum::Lvalue>; - -#[derive(Clone, Debug)] -struct HintEntry<'tcx> { - // The datum for the dropflag-hint itself; note that many - // source-level Lvalues will be associated with the same - // dropflag-hint datum. - datum: cleanup::DropHintDatum<'tcx>, -} - -pub struct DropFlagHintsMap<'tcx> { - // Maps NodeId for expressions that read/write unfragmented state - // to that state's drop-flag "hint." (A stack-local hint - // indicates either that (1.) it is certain that no-drop is - // needed, or (2.) inline drop-flag must be consulted.) - node_map: NodeMap>, -} - -impl<'tcx> DropFlagHintsMap<'tcx> { - pub fn new() -> DropFlagHintsMap<'tcx> { DropFlagHintsMap { node_map: NodeMap() } } - pub fn has_hint(&self, id: ast::NodeId) -> bool { self.node_map.contains_key(&id) } - pub fn insert(&mut self, id: ast::NodeId, datum: cleanup::DropHintDatum<'tcx>) { - self.node_map.insert(id, HintEntry { datum: datum }); - } - pub fn hint_datum(&self, id: ast::NodeId) -> Option> { - self.node_map.get(&id).map(|t|t.datum) - } + assert!(!substs.needs_infer()); } // Function context. Every LLVM function we create will have one of @@ -352,12 +276,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // A marker for the place where we want to insert the function's static // allocas, so that LLVM will coalesce them into a single alloca call. pub alloca_insert_pt: Cell>, - pub llreturn: Cell>, - - // If the function has any nested return's, including something like: - // fn foo() -> Option { Some(Foo { x: return None }) }, then - // we use a separate alloca for each return - pub needs_ret_allocas: bool, // When working with landingpad-based exceptions this value is alloca'd and // later loaded when using the resume instruction. This ends up being @@ -367,17 +285,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // Note that for cleanuppad-based exceptions this is not used. pub landingpad_alloca: Cell>, - // Maps the DefId's for local variables to the allocas created for - // them in llallocas. - pub lllocals: RefCell>>, - - // Same as above, but for closure upvars - pub llupvars: RefCell>, - - // Carries info about drop-flags for local bindings (longer term, - // paths) for the code being compiled. - pub lldropflag_hints: RefCell>, - // Describes the return/argument LLVM types and their ABI handling. pub fn_ty: FnType, @@ -402,9 +309,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { pub debug_context: debuginfo::FunctionDebugContext, // Cleanup scopes. - pub scopes: RefCell>>, - - pub cfg: Option, + pub scopes: RefCell>>, } impl<'a, 'tcx> FunctionContext<'a, 'tcx> { @@ -420,74 +325,22 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { } } - pub fn get_llreturn(&self) -> BasicBlockRef { - if self.llreturn.get().is_none() { - - self.llreturn.set(Some(unsafe { - llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn, - "return\0".as_ptr() as *const _) - })) - } - - self.llreturn.get().unwrap() - } - - pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, name: &str) -> ValueRef { - if self.needs_ret_allocas { - base::alloca(bcx, self.fn_ty.ret.memory_ty(self.ccx), name) - } else { - self.llretslotptr.get().unwrap() - } - } - pub fn new_block(&'a self, - name: &str, - opt_node_id: Option) + name: &str) -> Block<'a, 'tcx> { unsafe { let name = CString::new(name).unwrap(); let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn, name.as_ptr()); - BlockS::new(llbb, opt_node_id, self) - } - } - - pub fn new_id_block(&'a self, - name: &str, - node_id: ast::NodeId) - -> Block<'a, 'tcx> { - self.new_block(name, Some(node_id)) - } - - pub fn new_temp_block(&'a self, - name: &str) - -> Block<'a, 'tcx> { - self.new_block(name, None) - } - - pub fn join_blocks(&'a self, - id: ast::NodeId, - in_cxs: &[Block<'a, 'tcx>]) - -> Block<'a, 'tcx> { - let out = self.new_id_block("join", id); - let mut reachable = false; - for bcx in in_cxs { - if !bcx.unreachable.get() { - build::Br(*bcx, out.llbb, DebugLoc::None); - reachable = true; - } - } - if !reachable { - build::Unreachable(out); + BlockS::new(llbb, self) } - return out; } pub fn monomorphize(&self, value: &T) -> T where T: TransNormalize<'tcx> { - monomorphize::apply_param_substs(self.ccx.tcx(), + monomorphize::apply_param_substs(self.ccx.shared(), self.param_substs, value) } @@ -523,7 +376,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { let tcx = ccx.tcx(); match tcx.lang_items.eh_personality() { Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => { - Callee::def(ccx, def_id, tcx.mk_substs(Substs::empty())).reify(ccx).val + Callee::def(ccx, def_id, Substs::empty(tcx)).reify(ccx) } _ => { if let Some(llpersonality) = ccx.eh_personality().get() { @@ -550,7 +403,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { let tcx = ccx.tcx(); assert!(ccx.sess().target.target.options.custom_unwind_resume); if let Some(def_id) = tcx.lang_items.eh_unwind_resume() { - return Callee::def(ccx, def_id, tcx.mk_substs(Substs::empty())); + return Callee::def(ccx, def_id, Substs::empty(tcx)); } let ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { @@ -565,12 +418,12 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { let unwresume = ccx.eh_unwind_resume(); if let Some(llfn) = unwresume.get() { - return Callee::ptr(datum::immediate_rvalue(llfn, ty)); + return Callee::ptr(llfn, ty); } let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty); attributes::unwind(llfn, true); unwresume.set(Some(llfn)); - Callee::ptr(datum::immediate_rvalue(llfn, ty)) + Callee::ptr(llfn, ty) } } @@ -593,10 +446,6 @@ pub struct BlockS<'blk, 'tcx: 'blk> { // kind of landing pad its in, otherwise this is none. pub lpad: Cell>, - // AST node-id associated with this block, if any. Used for - // debugging purposes only. - pub opt_node_id: Option, - // The function context for the function to which this block is // attached. pub fcx: &'blk FunctionContext<'blk, 'tcx>, @@ -606,7 +455,6 @@ pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>; impl<'blk, 'tcx> BlockS<'blk, 'tcx> { pub fn new(llbb: BasicBlockRef, - opt_node_id: Option, fcx: &'blk FunctionContext<'blk, 'tcx>) -> Block<'blk, 'tcx> { fcx.block_arena.alloc(BlockS { @@ -614,7 +462,6 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> { terminated: Cell::new(false), unreachable: Cell::new(false), lpad: Cell::new(None), - opt_node_id: opt_node_id, fcx: fcx }) } @@ -662,7 +509,7 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> { pub fn monomorphize(&self, value: &T) -> T where T: TransNormalize<'tcx> { - monomorphize::apply_param_substs(self.tcx(), + monomorphize::apply_param_substs(self.fcx.ccx.shared(), self.fcx.param_substs, value) } @@ -883,13 +730,6 @@ pub fn C_integral(t: Type, u: u64, sign_extend: bool) -> ValueRef { } } -pub fn C_floating(s: &str, t: Type) -> ValueRef { - unsafe { - let s = CString::new(s).unwrap(); - llvm::LLVMConstRealOfString(t.to_ref(), s.as_ptr()) - } -} - pub fn C_floating_f64(f: f64, t: Type) -> ValueRef { unsafe { llvm::LLVMConstReal(t.to_ref(), f) @@ -916,19 +756,6 @@ pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef { C_integral(Type::i64(ccx), i, false) } -pub fn C_int(ccx: &CrateContext, i: I) -> ValueRef { - let v = i.as_i64(); - - let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type()); - - if bit_size < 64 { - // make sure it doesn't overflow - assert!(v < (1<<(bit_size-1)) && v >= -(1<<(bit_size-1))); - } - - C_integral(ccx.int_type(), v as u64, true) -} - pub fn C_uint(ccx: &CrateContext, i: I) -> ValueRef { let v = i.as_u64(); @@ -980,7 +807,7 @@ pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> Va }); llvm::LLVMSetInitializer(g, sc); llvm::LLVMSetGlobalConstant(g, True); - llvm::LLVMSetLinkage(g, llvm::InternalLinkage); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); cx.const_cstr_cache().borrow_mut().insert(s, g); g @@ -1048,12 +875,6 @@ pub fn const_get_elt(v: ValueRef, us: &[c_uint]) } } -pub fn const_to_int(v: ValueRef) -> i64 { - unsafe { - llvm::LLVMConstIntGetSExtValue(v) - } -} - pub fn const_to_uint(v: ValueRef) -> u64 { unsafe { llvm::LLVMConstIntGetZExtValue(v) @@ -1099,24 +920,6 @@ pub fn is_null(val: ValueRef) -> bool { } } -pub fn monomorphize_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> { - bcx.fcx.monomorphize(&t) -} - -pub fn node_id_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, id: ast::NodeId) -> Ty<'tcx> { - let tcx = bcx.tcx(); - let t = tcx.node_id_to_type(id); - monomorphize_type(bcx, t) -} - -pub fn expr_ty<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> { - node_id_type(bcx, ex.id) -} - -pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> { - monomorphize_type(bcx, bcx.tcx().expr_ty_adjusted(ex)) -} - /// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we /// do not (necessarily) resolve all nested obligations on the impl. Note that type check should /// guarantee to us that all nested obligations *could be* resolved if we wanted to. @@ -1136,7 +939,7 @@ pub fn fulfill_obligation<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, // Do the initial selection for the obligation. This yields the // shallow result we are looking for -- that is, what specific impl. - tcx.normalizing_infer_ctxt(Reveal::All).enter(|infcx| { + tcx.infer_ctxt(None, None, Reveal::All).enter(|infcx| { let mut selcx = SelectionContext::new(&infcx); let obligation_cause = traits::ObligationCause::misc(span, @@ -1184,35 +987,6 @@ pub fn fulfill_obligation<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, }) } -/// Normalizes the predicates and checks whether they hold. If this -/// returns false, then either normalize encountered an error or one -/// of the predicates did not hold. Used when creating vtables to -/// check for unsatisfiable methods. -pub fn normalize_and_test_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - predicates: Vec>) - -> bool -{ - debug!("normalize_and_test_predicates(predicates={:?})", - predicates); - - tcx.normalizing_infer_ctxt(Reveal::All).enter(|infcx| { - let mut selcx = SelectionContext::new(&infcx); - let mut fulfill_cx = traits::FulfillmentContext::new(); - let cause = traits::ObligationCause::dummy(); - let traits::Normalized { value: predicates, obligations } = - traits::normalize(&mut selcx, cause.clone(), &predicates); - for obligation in obligations { - fulfill_cx.register_predicate_obligation(&infcx, obligation); - } - for predicate in predicates { - let obligation = traits::Obligation::new(cause.clone(), predicate); - fulfill_cx.register_predicate_obligation(&infcx, obligation); - } - - infcx.drain_fulfillment_cx(&mut fulfill_cx, &()).is_ok() - }) -} - pub fn langcall(tcx: TyCtxt, span: Option, msg: &str, @@ -1230,34 +1004,6 @@ pub fn langcall(tcx: TyCtxt, } } -/// Return the VariantDef corresponding to an inlined variant node -pub fn inlined_variant_def<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - inlined_vid: ast::NodeId) - -> ty::VariantDef<'tcx> -{ - let ctor_ty = ccx.tcx().node_id_to_type(inlined_vid); - debug!("inlined_variant_def: ctor_ty={:?} inlined_vid={:?}", ctor_ty, - inlined_vid); - let adt_def = match ctor_ty.sty { - ty::TyFnDef(_, _, &ty::BareFnTy { sig: ty::Binder(ty::FnSig { - output, .. - }), ..}) => output, - _ => ctor_ty - }.ty_adt_def().unwrap(); - let variant_def_id = if ccx.tcx().map.is_inlined_node_id(inlined_vid) { - ccx.defid_for_inlined_node(inlined_vid).unwrap() - } else { - ccx.tcx().map.local_def_id(inlined_vid) - }; - - adt_def.variants - .iter() - .find(|v| variant_def_id == v.did) - .unwrap_or_else(|| { - bug!("no variant for {:?}::{}", adt_def, inlined_vid) - }) -} - // To avoid UB from LLVM, these two functions mask RHS with an // appropriate mask unconditionally (i.e. the fallback behavior for // all shifts). For 32- and 64-bit types, this matches the semantics diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs index 3ecba3691d..15f7132e52 100644 --- a/src/librustc_trans/consts.rs +++ b/src/librustc_trans/consts.rs @@ -11,98 +11,26 @@ use llvm; use llvm::{SetUnnamedAddr}; -use llvm::{InternalLinkage, ValueRef, Bool, True}; -use middle::const_qualif::ConstQualif; -use rustc_const_eval::{ConstEvalErr, lookup_const_fn_by_id, lookup_const_by_id, ErrKind}; -use rustc_const_eval::{eval_length, report_const_eval_err, note_const_eval_err}; -use rustc::hir::def::Def; +use llvm::{ValueRef, True}; +use rustc_const_eval::ConstEvalErr; use rustc::hir::def_id::DefId; use rustc::hir::map as hir_map; -use {abi, adt, closure, debuginfo, expr, machine}; +use {debuginfo, machine}; use base::{self, push_ctxt}; -use callee::Callee; use trans_item::TransItem; -use common::{type_is_sized, C_nil, const_get_elt}; -use common::{CrateContext, C_integral, C_floating, C_bool, C_str_slice, C_bytes, val_ty}; -use common::{C_struct, C_undef, const_to_opt_int, const_to_opt_uint, VariantInfo, C_uint}; -use common::{type_is_fat_ptr, Field, C_vector, C_array, C_null}; -use datum::{Datum, Lvalue}; +use common::{CrateContext, val_ty}; use declare; -use monomorphize::{self, Instance}; +use monomorphize::{Instance}; use type_::Type; use type_of; -use value::Value; -use Disr; -use rustc::ty::subst::Substs; -use rustc::ty::adjustment::{AdjustNeverToAny, AdjustDerefRef, AdjustReifyFnPointer}; -use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer}; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::cast::{CastTy,IntTy}; -use util::nodemap::NodeMap; -use rustc_const_math::{ConstInt, ConstUsize, ConstIsize}; +use rustc::ty; use rustc::hir; use std::ffi::{CStr, CString}; -use libc::c_uint; -use syntax::ast::{self, LitKind}; -use syntax::attr::{self, AttrMetaMethods}; +use syntax::ast; +use syntax::attr; use syntax::parse::token; -use syntax::ptr::P; -use syntax_pos::Span; - -pub type FnArgMap<'a> = Option<&'a NodeMap>; - -pub fn const_lit(cx: &CrateContext, e: &hir::Expr, lit: &ast::Lit) - -> ValueRef { - let _icx = push_ctxt("trans_lit"); - debug!("const_lit: {:?}", lit); - match lit.node { - LitKind::Byte(b) => C_integral(Type::uint_from_ty(cx, ast::UintTy::U8), b as u64, false), - LitKind::Char(i) => C_integral(Type::char(cx), i as u64, false), - LitKind::Int(i, ast::LitIntType::Signed(t)) => { - C_integral(Type::int_from_ty(cx, t), i, true) - } - LitKind::Int(u, ast::LitIntType::Unsigned(t)) => { - C_integral(Type::uint_from_ty(cx, t), u, false) - } - LitKind::Int(i, ast::LitIntType::Unsuffixed) => { - let lit_int_ty = cx.tcx().node_id_to_type(e.id); - match lit_int_ty.sty { - ty::TyInt(t) => { - C_integral(Type::int_from_ty(cx, t), i as u64, true) - } - ty::TyUint(t) => { - C_integral(Type::uint_from_ty(cx, t), i as u64, false) - } - _ => span_bug!(lit.span, - "integer literal has type {:?} (expected int \ - or usize)", - lit_int_ty) - } - } - LitKind::Float(ref fs, t) => { - C_floating(&fs, Type::float_from_ty(cx, t)) - } - LitKind::FloatUnsuffixed(ref fs) => { - let lit_float_ty = cx.tcx().node_id_to_type(e.id); - match lit_float_ty.sty { - ty::TyFloat(t) => { - C_floating(&fs, Type::float_from_ty(cx, t)) - } - _ => { - span_bug!(lit.span, - "floating point literal doesn't have the right type"); - } - } - } - LitKind::Bool(b) => C_bool(cx, b), - LitKind::Str(ref s, _) => C_str_slice(cx, (*s).clone()), - LitKind::ByteStr(ref data) => { - addr_of(cx, C_bytes(cx, &data[..]), 1, "byte_str") - } - } -} pub fn ptrcast(val: ValueRef, ty: Type) -> ValueRef { unsafe { @@ -125,7 +53,7 @@ pub fn addr_of_mut(ccx: &CrateContext, }); llvm::LLVMSetInitializer(gv, cv); llvm::LLVMSetAlignment(gv, align); - llvm::LLVMSetLinkage(gv, InternalLinkage); + llvm::LLVMRustSetLinkage(gv, llvm::Linkage::InternalLinkage); SetUnnamedAddr(gv, true); gv } @@ -154,868 +82,13 @@ pub fn addr_of(ccx: &CrateContext, gv } -/// Deref a constant pointer -pub fn load_const(cx: &CrateContext, v: ValueRef, t: Ty) -> ValueRef { - let v = match cx.const_unsized().borrow().get(&v) { - Some(&v) => v, - None => v - }; - let d = unsafe { llvm::LLVMGetInitializer(v) }; - if !d.is_null() && t.is_bool() { - unsafe { llvm::LLVMConstTrunc(d, Type::i1(cx).to_ref()) } - } else { - d - } -} - -fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - v: ValueRef, - ty: Ty<'tcx>) - -> (ValueRef, Ty<'tcx>) { - match ty.builtin_deref(true, ty::NoPreference) { - Some(mt) => { - if type_is_sized(cx.tcx(), mt.ty) { - (load_const(cx, v, mt.ty), mt.ty) - } else { - // Derefing a fat pointer does not change the representation, - // just the type to the unsized contents. - (v, mt.ty) - } - } - None => { - bug!("unexpected dereferenceable type {:?}", ty) - } - } -} - -fn const_fn_call<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - substs: &'tcx Substs<'tcx>, - arg_vals: &[ValueRef], - param_substs: &'tcx Substs<'tcx>, - trueconst: TrueConst) -> Result { - let fn_like = lookup_const_fn_by_id(ccx.tcx(), def_id); - let fn_like = fn_like.expect("lookup_const_fn_by_id failed in const_fn_call"); - - let body = match fn_like.body().expr { - Some(ref expr) => expr, - None => return Ok(C_nil(ccx)) - }; - - let args = &fn_like.decl().inputs; - assert_eq!(args.len(), arg_vals.len()); - - let arg_ids = args.iter().map(|arg| arg.pat.id); - let fn_args = arg_ids.zip(arg_vals.iter().cloned()).collect(); - - let substs = ccx.tcx().mk_substs(substs.clone().erase_regions()); - let substs = monomorphize::apply_param_substs(ccx.tcx(), - param_substs, - &substs); - - const_expr(ccx, body, substs, Some(&fn_args), trueconst).map(|(res, _)| res) -} - -pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - ref_expr: &hir::Expr, - param_substs: &'tcx Substs<'tcx>) - -> &'tcx hir::Expr { - let substs = ccx.tcx().node_id_item_substs(ref_expr.id).substs; - let substs = ccx.tcx().mk_substs(substs.clone().erase_regions()); - let substs = monomorphize::apply_param_substs(ccx.tcx(), - param_substs, - &substs); - match lookup_const_by_id(ccx.tcx(), def_id, Some(substs)) { - Some((ref expr, _ty)) => expr, - None => { - span_bug!(ref_expr.span, "constant item not found") - } - } -} - -pub enum ConstEvalFailure { - /// in case the const evaluator failed on something that panic at runtime - /// as defined in RFC 1229 - Runtime(ConstEvalErr), - // in case we found a true constant - Compiletime(ConstEvalErr), -} - -impl ConstEvalFailure { - fn into_inner(self) -> ConstEvalErr { - match self { - Runtime(e) => e, - Compiletime(e) => e, - } - } - - pub fn as_inner(&self) -> &ConstEvalErr { - match self { - &Runtime(ref e) => e, - &Compiletime(ref e) => e, - } - } -} - -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum TrueConst { - Yes, No -} - -use self::ConstEvalFailure::*; - -fn get_const_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - ref_expr: &hir::Expr, - param_substs: &'tcx Substs<'tcx>) - -> Result { - let expr = get_const_expr(ccx, def_id, ref_expr, param_substs); - let empty_substs = ccx.tcx().mk_substs(Substs::empty()); - match get_const_expr_as_global(ccx, expr, ConstQualif::empty(), empty_substs, TrueConst::Yes) { - Err(Runtime(err)) => { - report_const_eval_err(ccx.tcx(), &err, expr.span, "expression").emit(); - Err(Compiletime(err)) - }, - other => other, - } -} - -pub fn get_const_expr_as_global<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - expr: &hir::Expr, - qualif: ConstQualif, - param_substs: &'tcx Substs<'tcx>, - trueconst: TrueConst) - -> Result { - debug!("get_const_expr_as_global: {:?}", expr.id); - // Special-case constants to cache a common global for all uses. - if let hir::ExprPath(..) = expr.node { - // `def` must be its own statement and cannot be in the `match` - // otherwise the `def_map` will be borrowed for the entire match instead - // of just to get the `def` value - match ccx.tcx().expect_def(expr.id) { - Def::Const(def_id) | Def::AssociatedConst(def_id) => { - if !ccx.tcx().tables.borrow().adjustments.contains_key(&expr.id) { - debug!("get_const_expr_as_global ({:?}): found const {:?}", - expr.id, def_id); - return get_const_val(ccx, def_id, expr, param_substs); - } - }, - _ => {}, - } - } - - let key = (expr.id, param_substs); - if let Some(&val) = ccx.const_values().borrow().get(&key) { - return Ok(val); - } - let ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, - &ccx.tcx().expr_ty(expr)); - let val = if qualif.intersects(ConstQualif::NON_STATIC_BORROWS) { - // Avoid autorefs as they would create global instead of stack - // references, even when only the latter are correct. - const_expr_unadjusted(ccx, expr, ty, param_substs, None, trueconst)? - } else { - const_expr(ccx, expr, param_substs, None, trueconst)?.0 - }; - - // boolean SSA values are i1, but they have to be stored in i8 slots, - // otherwise some LLVM optimization passes don't work as expected - let val = unsafe { - if llvm::LLVMTypeOf(val) == Type::i1(ccx).to_ref() { - llvm::LLVMConstZExt(val, Type::i8(ccx).to_ref()) - } else { - val - } - }; - - let lvalue = addr_of(ccx, val, type_of::align_of(ccx, ty), "const"); - ccx.const_values().borrow_mut().insert(key, lvalue); - Ok(lvalue) -} - -pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - e: &hir::Expr, - param_substs: &'tcx Substs<'tcx>, - fn_args: FnArgMap, - trueconst: TrueConst) - -> Result<(ValueRef, Ty<'tcx>), ConstEvalFailure> { - let ety = monomorphize::apply_param_substs(cx.tcx(), param_substs, - &cx.tcx().expr_ty(e)); - let llconst = const_expr_unadjusted(cx, e, ety, param_substs, fn_args, trueconst)?; - let mut llconst = llconst; - let mut ety_adjusted = monomorphize::apply_param_substs(cx.tcx(), param_substs, - &cx.tcx().expr_ty_adjusted(e)); - let opt_adj = cx.tcx().tables.borrow().adjustments.get(&e.id).cloned(); - match opt_adj { - Some(AdjustNeverToAny(..)) => span_bug!(e.span, "const expression of type ! encountered"), - Some(AdjustReifyFnPointer) => { - match ety.sty { - ty::TyFnDef(def_id, substs, _) => { - llconst = Callee::def(cx, def_id, substs).reify(cx).val; - } - _ => { - bug!("{} cannot be reified to a fn ptr", ety) - } - } - } - Some(AdjustUnsafeFnPointer) | Some(AdjustMutToConstPointer) => { - // purely a type-level thing - } - Some(AdjustDerefRef(adj)) => { - let mut ty = ety; - // Save the last autoderef in case we can avoid it. - if adj.autoderefs > 0 { - for _ in 0..adj.autoderefs-1 { - let (dv, dt) = const_deref(cx, llconst, ty); - llconst = dv; - ty = dt; - } - } - - if adj.autoref.is_some() { - if adj.autoderefs == 0 { - // Don't copy data to do a deref+ref - // (i.e., skip the last auto-deref). - llconst = addr_of(cx, llconst, type_of::align_of(cx, ty), "autoref"); - ty = cx.tcx().mk_imm_ref(cx.tcx().mk_region(ty::ReErased), ty); - } - } else if adj.autoderefs > 0 { - let (dv, dt) = const_deref(cx, llconst, ty); - llconst = dv; - - // If we derefed a fat pointer then we will have an - // open type here. So we need to update the type with - // the one returned from const_deref. - ety_adjusted = dt; - } - - if let Some(target) = adj.unsize { - let target = monomorphize::apply_param_substs(cx.tcx(), - param_substs, - &target); - - let pointee_ty = ty.builtin_deref(true, ty::NoPreference) - .expect("consts: unsizing got non-pointer type").ty; - let (base, old_info) = if !type_is_sized(cx.tcx(), pointee_ty) { - // Normally, the source is a thin pointer and we are - // adding extra info to make a fat pointer. The exception - // is when we are upcasting an existing object fat pointer - // to use a different vtable. In that case, we want to - // load out the original data pointer so we can repackage - // it. - (const_get_elt(llconst, &[abi::FAT_PTR_ADDR as u32]), - Some(const_get_elt(llconst, &[abi::FAT_PTR_EXTRA as u32]))) - } else { - (llconst, None) - }; - - let unsized_ty = target.builtin_deref(true, ty::NoPreference) - .expect("consts: unsizing got non-pointer target type").ty; - let ptr_ty = type_of::in_memory_type_of(cx, unsized_ty).ptr_to(); - let base = ptrcast(base, ptr_ty); - let info = base::unsized_info(cx, pointee_ty, unsized_ty, old_info); - - if old_info.is_none() { - let prev_const = cx.const_unsized().borrow_mut() - .insert(base, llconst); - assert!(prev_const.is_none() || prev_const == Some(llconst)); - } - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - llconst = C_struct(cx, &[base, info], false); - } - } - None => {} - }; - - let llty = type_of::sizing_type_of(cx, ety_adjusted); - let csize = machine::llsize_of_alloc(cx, val_ty(llconst)); - let tsize = machine::llsize_of_alloc(cx, llty); - if csize != tsize { - cx.sess().abort_if_errors(); - unsafe { - // FIXME these values could use some context - llvm::LLVMDumpValue(llconst); - llvm::LLVMDumpValue(C_undef(llty)); - } - bug!("const {:?} of type {:?} has size {} instead of {}", - e, ety_adjusted, - csize, tsize); - } - Ok((llconst, ety_adjusted)) -} - -fn check_unary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty, - te: ValueRef, trueconst: TrueConst) -> Result<(), ConstEvalFailure> { - // The only kind of unary expression that we check for validity - // here is `-expr`, to check if it "overflows" (e.g. `-i32::MIN`). - if let hir::ExprUnary(hir::UnNeg, ref inner_e) = e.node { - - // An unfortunate special case: we parse e.g. -128 as a - // negation of the literal 128, which means if we're expecting - // a i8 (or if it was already suffixed, e.g. `-128_i8`), then - // 128 will have already overflowed to -128, and so then the - // constant evaluator thinks we're trying to negate -128. - // - // Catch this up front by looking for ExprLit directly, - // and just accepting it. - if let hir::ExprLit(_) = inner_e.node { return Ok(()); } - let cval = match to_const_int(te, t, cx.tcx()) { - Some(v) => v, - None => return Ok(()), - }; - const_err(cx, e.span, (-cval).map_err(ErrKind::Math), trueconst)?; - } - Ok(()) -} - -pub fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option { - match t.sty { - ty::TyInt(int_type) => const_to_opt_int(value).and_then(|input| match int_type { - ast::IntTy::I8 => { - assert_eq!(input as i8 as i64, input); - Some(ConstInt::I8(input as i8)) - }, - ast::IntTy::I16 => { - assert_eq!(input as i16 as i64, input); - Some(ConstInt::I16(input as i16)) - }, - ast::IntTy::I32 => { - assert_eq!(input as i32 as i64, input); - Some(ConstInt::I32(input as i32)) - }, - ast::IntTy::I64 => { - Some(ConstInt::I64(input)) - }, - ast::IntTy::Is => { - ConstIsize::new(input, tcx.sess.target.int_type) - .ok().map(ConstInt::Isize) - }, - }), - ty::TyUint(uint_type) => const_to_opt_uint(value).and_then(|input| match uint_type { - ast::UintTy::U8 => { - assert_eq!(input as u8 as u64, input); - Some(ConstInt::U8(input as u8)) - }, - ast::UintTy::U16 => { - assert_eq!(input as u16 as u64, input); - Some(ConstInt::U16(input as u16)) - }, - ast::UintTy::U32 => { - assert_eq!(input as u32 as u64, input); - Some(ConstInt::U32(input as u32)) - }, - ast::UintTy::U64 => { - Some(ConstInt::U64(input)) - }, - ast::UintTy::Us => { - ConstUsize::new(input, tcx.sess.target.uint_type) - .ok().map(ConstInt::Usize) - }, - }), - _ => None, - } -} - -pub fn const_err(cx: &CrateContext, - span: Span, - result: Result, - trueconst: TrueConst) - -> Result { - match (result, trueconst) { - (Ok(x), _) => Ok(x), - (Err(err), TrueConst::Yes) => { - let err = ConstEvalErr{ span: span, kind: err }; - report_const_eval_err(cx.tcx(), &err, span, "expression").emit(); - Err(Compiletime(err)) - }, - (Err(err), TrueConst::No) => { - let err = ConstEvalErr{ span: span, kind: err }; - let mut diag = cx.tcx().sess.struct_span_warn( - span, "this expression will panic at run-time"); - note_const_eval_err(cx.tcx(), &err, span, "expression", &mut diag); - diag.emit(); - Err(Runtime(err)) - }, - } -} - -fn check_binary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty, - te1: ValueRef, te2: ValueRef, - trueconst: TrueConst) -> Result<(), ConstEvalFailure> { - let b = if let hir::ExprBinary(b, _, _) = e.node { b } else { bug!() }; - let (lhs, rhs) = match (to_const_int(te1, t, cx.tcx()), to_const_int(te2, t, cx.tcx())) { - (Some(v1), Some(v2)) => (v1, v2), - _ => return Ok(()), - }; - let result = match b.node { - hir::BiAdd => lhs + rhs, - hir::BiSub => lhs - rhs, - hir::BiMul => lhs * rhs, - hir::BiDiv => lhs / rhs, - hir::BiRem => lhs % rhs, - hir::BiShl => lhs << rhs, - hir::BiShr => lhs >> rhs, - _ => return Ok(()), - }; - const_err(cx, e.span, result.map_err(ErrKind::Math), trueconst)?; - Ok(()) -} - -fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - e: &hir::Expr, - ety: Ty<'tcx>, - param_substs: &'tcx Substs<'tcx>, - fn_args: FnArgMap, - trueconst: TrueConst) - -> Result -{ - debug!("const_expr_unadjusted(e={:?}, ety={:?}, param_substs={:?})", - e, - ety, - param_substs); - - let map_list = |exprs: &[P]| -> Result, ConstEvalFailure> { - exprs.iter() - .map(|e| const_expr(cx, &e, param_substs, fn_args, trueconst).map(|(l, _)| l)) - .collect::>>() - .into_iter() - .collect() - // this dance is necessary to eagerly run const_expr so all errors are reported - }; - let _icx = push_ctxt("const_expr"); - Ok(match e.node { - hir::ExprLit(ref lit) => const_lit(cx, e, &lit), - hir::ExprBinary(b, ref e1, ref e2) => { - /* Neither type is bottom, and we expect them to be unified - * already, so the following is safe. */ - let (te1, ty) = const_expr(cx, &e1, param_substs, fn_args, trueconst)?; - debug!("const_expr_unadjusted: te1={:?}, ty={:?}", - Value(te1), ty); - assert!(!ty.is_simd()); - let is_float = ty.is_fp(); - let signed = ty.is_signed(); - - let (te2, ty2) = const_expr(cx, &e2, param_substs, fn_args, trueconst)?; - debug!("const_expr_unadjusted: te2={:?}, ty={:?}", - Value(te2), ty2); - - check_binary_expr_validity(cx, e, ty, te1, te2, trueconst)?; - - unsafe { match b.node { - hir::BiAdd if is_float => llvm::LLVMConstFAdd(te1, te2), - hir::BiAdd => llvm::LLVMConstAdd(te1, te2), - - hir::BiSub if is_float => llvm::LLVMConstFSub(te1, te2), - hir::BiSub => llvm::LLVMConstSub(te1, te2), - - hir::BiMul if is_float => llvm::LLVMConstFMul(te1, te2), - hir::BiMul => llvm::LLVMConstMul(te1, te2), - - hir::BiDiv if is_float => llvm::LLVMConstFDiv(te1, te2), - hir::BiDiv if signed => llvm::LLVMConstSDiv(te1, te2), - hir::BiDiv => llvm::LLVMConstUDiv(te1, te2), - - hir::BiRem if is_float => llvm::LLVMConstFRem(te1, te2), - hir::BiRem if signed => llvm::LLVMConstSRem(te1, te2), - hir::BiRem => llvm::LLVMConstURem(te1, te2), - - hir::BiAnd => llvm::LLVMConstAnd(te1, te2), - hir::BiOr => llvm::LLVMConstOr(te1, te2), - hir::BiBitXor => llvm::LLVMConstXor(te1, te2), - hir::BiBitAnd => llvm::LLVMConstAnd(te1, te2), - hir::BiBitOr => llvm::LLVMConstOr(te1, te2), - hir::BiShl => { - let te2 = base::cast_shift_const_rhs(b.node, te1, te2); - llvm::LLVMConstShl(te1, te2) - }, - hir::BiShr => { - let te2 = base::cast_shift_const_rhs(b.node, te1, te2); - if signed { llvm::LLVMConstAShr(te1, te2) } - else { llvm::LLVMConstLShr(te1, te2) } - }, - hir::BiEq | hir::BiNe | hir::BiLt | hir::BiLe | hir::BiGt | hir::BiGe => { - if is_float { - let cmp = base::bin_op_to_fcmp_predicate(b.node); - llvm::LLVMConstFCmp(cmp, te1, te2) - } else { - let cmp = base::bin_op_to_icmp_predicate(b.node, signed); - llvm::LLVMConstICmp(cmp, te1, te2) - } - }, - } } // unsafe { match b.node { - }, - hir::ExprUnary(u, ref inner_e) => { - let (te, ty) = const_expr(cx, &inner_e, param_substs, fn_args, trueconst)?; - - check_unary_expr_validity(cx, e, ty, te, trueconst)?; - - let is_float = ty.is_fp(); - unsafe { match u { - hir::UnDeref => const_deref(cx, te, ty).0, - hir::UnNot => llvm::LLVMConstNot(te), - hir::UnNeg if is_float => llvm::LLVMConstFNeg(te), - hir::UnNeg => llvm::LLVMConstNeg(te), - } } - }, - hir::ExprField(ref base, field) => { - let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?; - let brepr = adt::represent_type(cx, bt); - let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None); - let ix = vinfo.field_index(field.node); - adt::const_get_field(&brepr, bv, vinfo.discr, ix) - }, - hir::ExprTupField(ref base, idx) => { - let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?; - let brepr = adt::represent_type(cx, bt); - let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None); - adt::const_get_field(&brepr, bv, vinfo.discr, idx.node) - }, - hir::ExprIndex(ref base, ref index) => { - let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?; - let iv = const_expr(cx, &index, param_substs, fn_args, TrueConst::Yes)?.0; - let iv = if let Some(iv) = const_to_opt_uint(iv) { - iv - } else { - span_bug!(index.span, "index is not an integer-constant expression"); - }; - let (arr, len) = match bt.sty { - ty::TyArray(_, u) => (bv, C_uint(cx, u)), - ty::TySlice(..) | ty::TyStr => { - let e1 = const_get_elt(bv, &[0]); - (load_const(cx, e1, bt), const_get_elt(bv, &[1])) - }, - ty::TyRef(_, mt) => match mt.ty.sty { - ty::TyArray(_, u) => { - (load_const(cx, bv, mt.ty), C_uint(cx, u)) - }, - _ => span_bug!(base.span, - "index-expr base must be a vector \ - or string type, found {:?}", - bt), - }, - _ => span_bug!(base.span, - "index-expr base must be a vector \ - or string type, found {:?}", - bt), - }; - - let len = unsafe { llvm::LLVMConstIntGetZExtValue(len) as u64 }; - let len = match bt.sty { - ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => match ty.sty { - ty::TyStr => { - assert!(len > 0); - len - 1 - }, - _ => len, - }, - _ => len, - }; - if iv >= len { - // FIXME #3170: report this earlier on in the const-eval - // pass. Reporting here is a bit late. - const_err(cx, e.span, Err(ErrKind::IndexOutOfBounds { - len: len, - index: iv - }), trueconst)?; - C_undef(val_ty(arr).element_type()) - } else { - const_get_elt(arr, &[iv as c_uint]) - } - }, - hir::ExprCast(ref base, _) => { - let t_cast = ety; - let llty = type_of::type_of(cx, t_cast); - let (v, t_expr) = const_expr(cx, &base, param_substs, fn_args, trueconst)?; - debug!("trans_const_cast({:?} as {:?})", t_expr, t_cast); - if expr::cast_is_noop(cx.tcx(), base, t_expr, t_cast) { - return Ok(v); - } - if type_is_fat_ptr(cx.tcx(), t_expr) { - // Fat pointer casts. - let t_cast_inner = - t_cast.builtin_deref(true, ty::NoPreference).expect("cast to non-pointer").ty; - let ptr_ty = type_of::in_memory_type_of(cx, t_cast_inner).ptr_to(); - let addr = ptrcast(const_get_elt(v, &[abi::FAT_PTR_ADDR as u32]), - ptr_ty); - if type_is_fat_ptr(cx.tcx(), t_cast) { - let info = const_get_elt(v, &[abi::FAT_PTR_EXTRA as u32]); - return Ok(C_struct(cx, &[addr, info], false)) - } else { - return Ok(addr); - } - } - unsafe { match ( - CastTy::from_ty(t_expr).expect("bad input type for cast"), - CastTy::from_ty(t_cast).expect("bad output type for cast"), - ) { - (CastTy::Int(IntTy::CEnum), CastTy::Int(_)) => { - let repr = adt::represent_type(cx, t_expr); - let discr = adt::const_get_discrim(&repr, v); - let iv = C_integral(cx.int_type(), discr.0, false); - let s = adt::is_discr_signed(&repr) as Bool; - llvm::LLVMConstIntCast(iv, llty.to_ref(), s) - }, - (CastTy::Int(_), CastTy::Int(_)) => { - let s = t_expr.is_signed() as Bool; - llvm::LLVMConstIntCast(v, llty.to_ref(), s) - }, - (CastTy::Int(_), CastTy::Float) => { - if t_expr.is_signed() { - llvm::LLVMConstSIToFP(v, llty.to_ref()) - } else { - llvm::LLVMConstUIToFP(v, llty.to_ref()) - } - }, - (CastTy::Float, CastTy::Float) => llvm::LLVMConstFPCast(v, llty.to_ref()), - (CastTy::Float, CastTy::Int(IntTy::I)) => llvm::LLVMConstFPToSI(v, llty.to_ref()), - (CastTy::Float, CastTy::Int(_)) => llvm::LLVMConstFPToUI(v, llty.to_ref()), - (CastTy::Ptr(_), CastTy::Ptr(_)) | (CastTy::FnPtr, CastTy::Ptr(_)) - | (CastTy::RPtr(_), CastTy::Ptr(_)) => { - ptrcast(v, llty) - }, - (CastTy::FnPtr, CastTy::FnPtr) => ptrcast(v, llty), // isn't this a coercion? - (CastTy::Int(_), CastTy::Ptr(_)) => llvm::LLVMConstIntToPtr(v, llty.to_ref()), - (CastTy::Ptr(_), CastTy::Int(_)) | (CastTy::FnPtr, CastTy::Int(_)) => { - llvm::LLVMConstPtrToInt(v, llty.to_ref()) - }, - _ => { - span_bug!(e.span, "bad combination of types for cast") - }, - } } // unsafe { match ( ... ) { - }, - hir::ExprAddrOf(hir::MutImmutable, ref sub) => { - // If this is the address of some static, then we need to return - // the actual address of the static itself (short circuit the rest - // of const eval). - let mut cur = sub; - loop { - match cur.node { - hir::ExprBlock(ref blk) => { - if let Some(ref sub) = blk.expr { - cur = sub; - } else { - break; - } - }, - _ => break, - } - } - if let Some(Def::Static(def_id, _)) = cx.tcx().expect_def_or_none(cur.id) { - get_static(cx, def_id).val - } else { - // If this isn't the address of a static, then keep going through - // normal constant evaluation. - let (v, ty) = const_expr(cx, &sub, param_substs, fn_args, trueconst)?; - addr_of(cx, v, type_of::align_of(cx, ty), "ref") - } - }, - hir::ExprAddrOf(hir::MutMutable, ref sub) => { - let (v, ty) = const_expr(cx, &sub, param_substs, fn_args, trueconst)?; - addr_of_mut(cx, v, type_of::align_of(cx, ty), "ref_mut_slice") - }, - hir::ExprTup(ref es) => { - let repr = adt::represent_type(cx, ety); - let vals = map_list(&es[..])?; - adt::trans_const(cx, &repr, Disr(0), &vals[..]) - }, - hir::ExprStruct(_, ref fs, ref base_opt) => { - let repr = adt::represent_type(cx, ety); - - let base_val = match *base_opt { - Some(ref base) => Some(const_expr( - cx, - &base, - param_substs, - fn_args, - trueconst, - )?), - None => None - }; - - let VariantInfo { discr, fields } = VariantInfo::of_node(cx.tcx(), ety, e.id); - let cs = fields.iter().enumerate().map(|(ix, &Field(f_name, _))| { - match (fs.iter().find(|f| f_name == f.name.node), base_val) { - (Some(ref f), _) => { - const_expr(cx, &f.expr, param_substs, fn_args, trueconst).map(|(l, _)| l) - }, - (_, Some((bv, _))) => Ok(adt::const_get_field(&repr, bv, discr, ix)), - (_, None) => span_bug!(e.span, "missing struct field"), - } - }) - .collect::>>() - .into_iter() - .collect::,ConstEvalFailure>>(); - let cs = cs?; - if ety.is_simd() { - C_vector(&cs[..]) - } else { - adt::trans_const(cx, &repr, discr, &cs[..]) - } - }, - hir::ExprVec(ref es) => { - let unit_ty = ety.sequence_element_type(cx.tcx()); - let llunitty = type_of::type_of(cx, unit_ty); - let vs = es.iter() - .map(|e| const_expr( - cx, - &e, - param_substs, - fn_args, - trueconst, - ).map(|(l, _)| l)) - .collect::>>() - .into_iter() - .collect::, ConstEvalFailure>>(); - let vs = vs?; - // If the vector contains enums, an LLVM array won't work. - if vs.iter().any(|vi| val_ty(*vi) != llunitty) { - C_struct(cx, &vs[..], false) - } else { - C_array(llunitty, &vs[..]) - } - }, - hir::ExprRepeat(ref elem, ref count) => { - let unit_ty = ety.sequence_element_type(cx.tcx()); - let llunitty = type_of::type_of(cx, unit_ty); - let n = eval_length(cx.tcx(), count, "repeat count").unwrap(); - let unit_val = const_expr(cx, &elem, param_substs, fn_args, trueconst)?.0; - let vs = vec![unit_val; n]; - if val_ty(unit_val) != llunitty { - C_struct(cx, &vs[..], false) - } else { - C_array(llunitty, &vs[..]) - } - }, - hir::ExprPath(..) => { - match cx.tcx().expect_def(e.id) { - Def::Local(_, id) => { - if let Some(val) = fn_args.and_then(|args| args.get(&id).cloned()) { - val - } else { - span_bug!(e.span, "const fn argument not found") - } - } - Def::Fn(..) | Def::Method(..) => C_nil(cx), - Def::Const(def_id) | Def::AssociatedConst(def_id) => { - load_const(cx, get_const_val(cx, def_id, e, param_substs)?, - ety) - } - Def::Variant(enum_did, variant_did) => { - let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did); - match vinfo.kind { - ty::VariantKind::Unit => { - let repr = adt::represent_type(cx, ety); - adt::trans_const(cx, &repr, Disr::from(vinfo.disr_val), &[]) - } - ty::VariantKind::Tuple => C_nil(cx), - ty::VariantKind::Struct => { - span_bug!(e.span, "path-expr refers to a dict variant!") - } - } - } - // Unit struct or ctor. - Def::Struct(..) => C_null(type_of::type_of(cx, ety)), - _ => { - span_bug!(e.span, "expected a const, fn, struct, \ - or variant def") - } - } - }, - hir::ExprCall(ref callee, ref args) => { - let mut callee = &**callee; - loop { - callee = match callee.node { - hir::ExprBlock(ref block) => match block.expr { - Some(ref tail) => &tail, - None => break, - }, - _ => break, - }; - } - let arg_vals = map_list(args)?; - match cx.tcx().expect_def(callee.id) { - Def::Fn(did) | Def::Method(did) => { - const_fn_call( - cx, - did, - cx.tcx().node_id_item_substs(callee.id).substs, - &arg_vals, - param_substs, - trueconst, - )? - } - Def::Struct(..) => { - if ety.is_simd() { - C_vector(&arg_vals[..]) - } else { - let repr = adt::represent_type(cx, ety); - adt::trans_const(cx, &repr, Disr(0), &arg_vals[..]) - } - } - Def::Variant(enum_did, variant_did) => { - let repr = adt::represent_type(cx, ety); - let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did); - adt::trans_const(cx, - &repr, - Disr::from(vinfo.disr_val), - &arg_vals[..]) - } - _ => span_bug!(e.span, "expected a struct, variant, or const fn def"), - } - }, - hir::ExprMethodCall(_, _, ref args) => { - let arg_vals = map_list(args)?; - let method_call = ty::MethodCall::expr(e.id); - let method = cx.tcx().tables.borrow().method_map[&method_call]; - const_fn_call(cx, method.def_id, method.substs, - &arg_vals, param_substs, trueconst)? - }, - hir::ExprType(ref e, _) => const_expr(cx, &e, param_substs, fn_args, trueconst)?.0, - hir::ExprBlock(ref block) => { - match block.expr { - Some(ref expr) => const_expr( - cx, - &expr, - param_substs, - fn_args, - trueconst, - )?.0, - None => C_nil(cx), - } - }, - hir::ExprClosure(_, ref decl, ref body, _) => { - match ety.sty { - ty::TyClosure(def_id, substs) => { - closure::trans_closure_expr(closure::Dest::Ignore(cx), - decl, - body, - e.id, - def_id, - substs); - } - _ => - span_bug!( - e.span, - "bad type for closure expr: {:?}", ety) - } - C_null(type_of::type_of(cx, ety)) - }, - _ => span_bug!(e.span, - "bad constant expression type in consts::const_expr"), - }) -} - -pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId) - -> Datum<'tcx, Lvalue> { - let ty = ccx.tcx().lookup_item_type(def_id).ty; - +pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { let instance = Instance::mono(ccx.shared(), def_id); if let Some(&g) = ccx.instances().borrow().get(&instance) { - return Datum::new(g, ty, Lvalue::new("static")); + return g; } + let ty = ccx.tcx().lookup_item_type(def_id).ty; let g = if let Some(id) = ccx.tcx().map.as_local_node_id(def_id) { let llty = type_of::type_of(ccx, ty); @@ -1032,14 +105,10 @@ pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId) let defined_in_current_codegen_unit = ccx.codegen_unit() .items() .contains_key(&TransItem::Static(id)); - if defined_in_current_codegen_unit { - if declare::get_declared_value(ccx, sym).is_none() { - span_bug!(span, "trans: Static not properly pre-defined?"); - } - } else { - if declare::get_declared_value(ccx, sym).is_some() { - span_bug!(span, "trans: Conflicting symbol names for static?"); - } + assert!(!defined_in_current_codegen_unit); + + if declare::get_declared_value(ccx, sym).is_some() { + span_bug!(span, "trans: Conflicting symbol names for static?"); } let g = declare::define_global(ccx, sym, llty).unwrap(); @@ -1073,7 +142,7 @@ pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId) unsafe { // Declare a symbol `foo` with the desired linkage. let g1 = declare::declare_global(ccx, &sym, llty2); - llvm::LLVMSetLinkage(g1, linkage); + llvm::LLVMRustSetLinkage(g1, linkage); // Declare an internal global `extern_with_linkage_foo` which // is initialized with the address of `foo`. If `foo` is @@ -1087,7 +156,7 @@ pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId) ccx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym)) }); - llvm::LLVMSetLinkage(g2, llvm::InternalLinkage); + llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage); llvm::LLVMSetInitializer(g2, g1); g2 } @@ -1136,34 +205,20 @@ pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId) ccx.instances().borrow_mut().insert(instance, g); ccx.statics().borrow_mut().insert(g, def_id); - Datum::new(g, ty, Lvalue::new("static")) + g } pub fn trans_static(ccx: &CrateContext, m: hir::Mutability, - expr: &hir::Expr, id: ast::NodeId, attrs: &[ast::Attribute]) -> Result { unsafe { let _icx = push_ctxt("trans_static"); let def_id = ccx.tcx().map.local_def_id(id); - let datum = get_static(ccx, def_id); + let g = get_static(ccx, def_id); - let check_attrs = |attrs: &[ast::Attribute]| { - let default_to_mir = ccx.sess().opts.debugging_opts.orbit; - let invert = if default_to_mir { "rustc_no_mir" } else { "rustc_mir" }; - default_to_mir ^ attrs.iter().any(|item| item.check_name(invert)) - }; - let use_mir = check_attrs(ccx.tcx().map.attrs(id)); - - let v = if use_mir { - ::mir::trans_static_initializer(ccx, def_id) - } else { - let empty_substs = ccx.tcx().mk_substs(Substs::empty()); - const_expr(ccx, expr, empty_substs, None, TrueConst::Yes) - .map(|(v, _)| v) - }.map_err(|e| e.into_inner())?; + let v = ::mir::trans_static_initializer(ccx, def_id)?; // boolean SSA values are i1, but they have to be stored in i8 slots, // otherwise some LLVM optimization passes don't work as expected @@ -1175,31 +230,32 @@ pub fn trans_static(ccx: &CrateContext, v }; - let llty = type_of::type_of(ccx, datum.ty); + let ty = ccx.tcx().lookup_item_type(def_id).ty; + let llty = type_of::type_of(ccx, ty); let g = if val_llty == llty { - datum.val + g } else { // If we created the global with the wrong type, // correct the type. let empty_string = CString::new("").unwrap(); - let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(datum.val)); + let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); - llvm::LLVMSetValueName(datum.val, empty_string.as_ptr()); + llvm::LLVMSetValueName(g, empty_string.as_ptr()); let new_g = llvm::LLVMRustGetOrInsertGlobal( ccx.llmod(), name_string.as_ptr(), val_llty.to_ref()); // To avoid breaking any invariants, we leave around the old // global for the moment; we'll replace all references to it // with the new global later. (See base::trans_crate.) - ccx.statics_to_rauw().borrow_mut().push((datum.val, new_g)); + ccx.statics_to_rauw().borrow_mut().push((g, new_g)); new_g }; - llvm::LLVMSetAlignment(g, type_of::align_of(ccx, datum.ty)); + llvm::LLVMSetAlignment(g, type_of::align_of(ccx, ty)); llvm::LLVMSetInitializer(g, v); // As an optimization, all shared statics which do not have interior // mutability are placed into read-only memory. if m != hir::MutMutable { - let tcontents = datum.ty.type_contents(ccx.tcx()); + let tcontents = ty.type_contents(ccx.tcx()); if !tcontents.interior_unsafe() { llvm::LLVMSetGlobalConstant(g, llvm::True); } diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 65eea1bbb6..1b67516a9e 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -17,7 +17,6 @@ use rustc::hir::def_id::DefId; use rustc::traits; use rustc::mir::mir_map::MirMap; use rustc::mir::repr as mir; -use adt; use base; use builder::Builder; use common::BuilderRef_res; @@ -30,7 +29,7 @@ use monomorphize::Instance; use partitioning::CodegenUnit; use trans_item::TransItem; use type_::{Type, TypeNames}; -use rustc::ty::subst::{Substs, VecPerParamSpace}; +use rustc::ty::subst::Substs; use rustc::ty::{self, Ty, TyCtxt}; use session::config::NoDebugInfo; use session::Session; @@ -53,9 +52,7 @@ pub struct Stats { pub n_glues_created: Cell, pub n_null_glues: Cell, pub n_real_glues: Cell, - pub n_fallback_instantiations: Cell, pub n_fns: Cell, - pub n_monos: Cell, pub n_inlines: Cell, pub n_closures: Cell, pub n_llvm_insns: Cell, @@ -79,7 +76,6 @@ pub struct SharedCrateContext<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, stats: Stats, check_overflow: bool, - check_drop_flag_for_sanity: bool, mir_map: &'a MirMap<'tcx>, mir_cache: RefCell>>, @@ -87,6 +83,7 @@ pub struct SharedCrateContext<'a, 'tcx: 'a> { translation_items: RefCell>>, trait_cache: RefCell>>, + project_cache: RefCell>>, } /// The local portion of a `CrateContext`. There is one `LocalCrateContext` @@ -104,7 +101,6 @@ pub struct LocalCrateContext<'tcx> { drop_glues: RefCell, (ValueRef, FnType)>>, /// Cache instances of monomorphic and polymorphic items instances: RefCell, ValueRef>>, - monomorphizing: RefCell>, /// Cache generated vtables vtables: RefCell, ValueRef>>, /// Cache of constant strings, @@ -145,7 +141,6 @@ pub struct LocalCrateContext<'tcx> { lltypes: RefCell, Type>>, llsizingtypes: RefCell, Type>>, - adt_reprs: RefCell, Rc>>>, type_hashcodes: RefCell, String>>, int_type: Type, opaque_vec_type: Type, @@ -199,6 +194,42 @@ impl<'tcx> DepTrackingMapConfig for MirCache<'tcx> { } } +// # Global Cache + +pub struct ProjectionCache<'gcx> { + data: PhantomData<&'gcx ()> +} + +impl<'gcx> DepTrackingMapConfig for ProjectionCache<'gcx> { + type Key = Ty<'gcx>; + type Value = Ty<'gcx>; + fn to_dep_node(key: &Self::Key) -> DepNode { + // Ideally, we'd just put `key` into the dep-node, but we + // can't put full types in there. So just collect up all the + // def-ids of structs/enums as well as any traits that we + // project out of. It doesn't matter so much what we do here, + // except that if we are too coarse, we'll create overly + // coarse edges between impls and the trans. For example, if + // we just used the def-id of things we are projecting out of, + // then the key for `::T` and `::T` would both share a dep-node + // (`TraitSelect(SomeTrait)`), and hence the impls for both + // `Foo` and `Bar` would be considered inputs. So a change to + // `Bar` would affect things that just normalized `Foo`. + // Anyway, this heuristic is not ideal, but better than + // nothing. + let def_ids: Vec = + key.walk() + .filter_map(|t| match t.sty { + ty::TyAdt(adt_def, _) => Some(adt_def.did), + ty::TyProjection(ref proj) => Some(proj.trait_ref.def_id), + _ => None, + }) + .collect(); + DepNode::TraitSelect(def_ids) + } +} + /// This list owns a number of LocalCrateContexts and binds them to their common /// SharedCrateContext. This type just exists as a convenience, something to /// pass around all LocalCrateContexts with and get an iterator over them. @@ -424,8 +455,7 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { symbol_hasher: Sha256, link_meta: LinkMeta, reachable: NodeSet, - check_overflow: bool, - check_drop_flag_for_sanity: bool) + check_overflow: bool) -> SharedCrateContext<'b, 'tcx> { let (metadata_llcx, metadata_llmod) = unsafe { create_context_and_module(&tcx.sess, "metadata") @@ -490,9 +520,7 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { n_glues_created: Cell::new(0), n_null_glues: Cell::new(0), n_real_glues: Cell::new(0), - n_fallback_instantiations: Cell::new(0), n_fns: Cell::new(0), - n_monos: Cell::new(0), n_inlines: Cell::new(0), n_closures: Cell::new(0), n_llvm_insns: Cell::new(0), @@ -500,10 +528,10 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { fn_stats: RefCell::new(Vec::new()), }, check_overflow: check_overflow, - check_drop_flag_for_sanity: check_drop_flag_for_sanity, use_dll_storage_attrs: use_dll_storage_attrs, translation_items: RefCell::new(FnvHashSet()), trait_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())), + project_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())), } } @@ -527,6 +555,10 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { &self.trait_cache } + pub fn project_cache(&self) -> &RefCell>> { + &self.project_cache + } + pub fn link_meta<'a>(&'a self) -> &'a LinkMeta { &self.link_meta } @@ -571,16 +603,11 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { /// Given the def-id of some item that has no type parameters, make /// a suitable "empty substs" for it. pub fn empty_substs_for_def_id(&self, item_def_id: DefId) -> &'tcx Substs<'tcx> { - let scheme = self.tcx().lookup_item_type(item_def_id); - self.empty_substs_for_scheme(&scheme) - } - - pub fn empty_substs_for_scheme(&self, scheme: &ty::TypeScheme<'tcx>) - -> &'tcx Substs<'tcx> { - assert!(scheme.generics.types.is_empty()); - self.tcx().mk_substs( - Substs::new(VecPerParamSpace::empty(), - scheme.generics.regions.map(|_| ty::ReErased))) + Substs::for_item(self.tcx(), item_def_id, + |_, _| self.tcx().mk_region(ty::ReErased), + |_, _| { + bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id) + }) } pub fn symbol_hasher(&self) -> &RefCell { @@ -636,7 +663,6 @@ impl<'tcx> LocalCrateContext<'tcx> { fn_pointer_shims: RefCell::new(FnvHashMap()), drop_glues: RefCell::new(FnvHashMap()), instances: RefCell::new(FnvHashMap()), - monomorphizing: RefCell::new(DefIdMap()), vtables: RefCell::new(FnvHashMap()), const_cstr_cache: RefCell::new(FnvHashMap()), const_unsized: RefCell::new(FnvHashMap()), @@ -649,7 +675,6 @@ impl<'tcx> LocalCrateContext<'tcx> { statics_to_rauw: RefCell::new(Vec::new()), lltypes: RefCell::new(FnvHashMap()), llsizingtypes: RefCell::new(FnvHashMap()), - adt_reprs: RefCell::new(FnvHashMap()), type_hashcodes: RefCell::new(FnvHashMap()), int_type: Type::from_ref(ptr::null_mut()), opaque_vec_type: Type::from_ref(ptr::null_mut()), @@ -840,10 +865,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().instances } - pub fn monomorphizing<'a>(&'a self) -> &'a RefCell> { - &self.local().monomorphizing - } - pub fn vtables<'a>(&'a self) -> &'a RefCell, ValueRef>> { &self.local().vtables } @@ -894,10 +915,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().llsizingtypes } - pub fn adt_reprs<'a>(&'a self) -> &'a RefCell, Rc>>> { - &self.local().adt_reprs - } - pub fn symbol_hasher<'a>(&'a self) -> &'a RefCell { &self.shared.symbol_hasher } @@ -967,15 +984,20 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { TypeOfDepthLock(self.local()) } - pub fn check_overflow(&self) -> bool { - self.shared.check_overflow + pub fn layout_of(&self, ty: Ty<'tcx>) -> &'tcx ty::layout::Layout { + self.tcx().infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| { + ty.layout(&infcx).unwrap_or_else(|e| { + match e { + ty::layout::LayoutError::SizeOverflow(_) => + self.sess().fatal(&e.to_string()), + _ => bug!("failed to get layout for `{}`: {}", ty, e) + } + }) + }) } - pub fn check_drop_flag_for_sanity(&self) -> bool { - // This controls whether we emit a conditional llvm.debugtrap - // guarded on whether the dropflag is one of its (two) valid - // values. - self.shared.check_drop_flag_for_sanity + pub fn check_overflow(&self) -> bool { + self.shared.check_overflow } pub fn use_dll_storage_attrs(&self) -> bool { @@ -999,11 +1021,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { pub fn empty_substs_for_def_id(&self, item_def_id: DefId) -> &'tcx Substs<'tcx> { self.shared().empty_substs_for_def_id(item_def_id) } - - pub fn empty_substs_for_scheme(&self, scheme: &ty::TypeScheme<'tcx>) - -> &'tcx Substs<'tcx> { - self.shared().empty_substs_for_scheme(scheme) - } } pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>); diff --git a/src/librustc_trans/controlflow.rs b/src/librustc_trans/controlflow.rs deleted file mode 100644 index 8845f12421..0000000000 --- a/src/librustc_trans/controlflow.rs +++ /dev/null @@ -1,434 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::ValueRef; -use rustc::hir::def::Def; -use middle::lang_items::{PanicFnLangItem, PanicBoundsCheckFnLangItem}; -use rustc::ty::subst::Substs; -use base::*; -use basic_block::BasicBlock; -use build::*; -use callee::{Callee, ArgVals}; -use cleanup::CleanupMethods; -use cleanup; -use common::*; -use consts; -use debuginfo; -use debuginfo::{DebugLoc, ToDebugLoc}; -use expr; -use machine; - -use rustc::hir; - -use syntax::ast; -use syntax::parse::token::InternedString; -use syntax::parse::token; - -pub fn trans_stmt<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - s: &hir::Stmt) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_stmt"); - let fcx = cx.fcx; - debug!("trans_stmt({:?})", s); - - if cx.unreachable.get() { - return cx; - } - - if cx.sess().asm_comments() { - add_span_comment(cx, s.span, &format!("{:?}", s)); - } - - let mut bcx = cx; - - let id = s.node.id(); - let cleanup_debug_loc = - debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), id, s.span, false); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); - - match s.node { - hir::StmtExpr(ref e, _) | hir::StmtSemi(ref e, _) => { - bcx = trans_stmt_semi(bcx, &e); - } - hir::StmtDecl(ref d, _) => { - match d.node { - hir::DeclLocal(ref local) => { - bcx = init_local(bcx, &local); - debuginfo::create_local_var_metadata(bcx, &local); - } - // Inner items are visited by `trans_item`/`trans_meth`. - hir::DeclItem(_) => {}, - } - } - } - - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, s.node.id()); - - return bcx; -} - -pub fn trans_stmt_semi<'blk, 'tcx>(cx: Block<'blk, 'tcx>, e: &hir::Expr) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_stmt_semi"); - - if cx.unreachable.get() { - return cx; - } - - let ty = expr_ty(cx, e); - if cx.fcx.type_needs_drop(ty) { - expr::trans_to_lvalue(cx, e, "stmt").bcx - } else { - expr::trans_into(cx, e, expr::Ignore) - } -} - -pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - b: &hir::Block, - mut dest: expr::Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_block"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - let mut bcx = bcx; - - let cleanup_debug_loc = - debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), b.id, b.span, true); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); - - for s in &b.stmts { - bcx = trans_stmt(bcx, s); - } - - if dest != expr::Ignore { - let block_ty = node_id_type(bcx, b.id); - - if b.expr.is_none() || type_is_zero_size(bcx.ccx(), block_ty) { - dest = expr::Ignore; - } else if b.expr.is_some() { - // If the block has an expression, but that expression isn't reachable, - // don't save into the destination given, ignore it. - if let Some(ref cfg) = bcx.fcx.cfg { - if !cfg.node_is_reachable(b.expr.as_ref().unwrap().id) { - dest = expr::Ignore; - } - } - } - } - - match b.expr { - Some(ref e) => { - if !bcx.unreachable.get() { - bcx = expr::trans_into(bcx, &e, dest); - } - } - None => { - assert!(dest == expr::Ignore || bcx.unreachable.get()); - } - } - - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, b.id); - - return bcx; -} - -pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - if_id: ast::NodeId, - cond: &hir::Expr, - thn: &hir::Block, - els: Option<&hir::Expr>, - dest: expr::Dest) - -> Block<'blk, 'tcx> { - debug!("trans_if(bcx={}, if_id={}, cond={:?}, thn={}, dest={:?})", - bcx.to_str(), if_id, cond, thn.id, dest); - let _icx = push_ctxt("trans_if"); - - if bcx.unreachable.get() { - return bcx; - } - - let mut bcx = bcx; - - let cond_val = unpack_result!(bcx, expr::trans(bcx, cond).to_llbool()); - - // Drop branches that are known to be impossible - if let Some(cv) = const_to_opt_uint(cond_val) { - if cv == 1 { - // if true { .. } [else { .. }] - bcx = trans_block(bcx, &thn, dest); - DebugLoc::None.apply(bcx.fcx); - } else { - if let Some(elexpr) = els { - bcx = expr::trans_into(bcx, &elexpr, dest); - DebugLoc::None.apply(bcx.fcx); - } - } - - return bcx; - } - - let name = format!("then-block-{}-", thn.id); - let then_bcx_in = bcx.fcx.new_id_block(&name[..], thn.id); - let then_bcx_out = trans_block(then_bcx_in, &thn, dest); - DebugLoc::None.apply(bcx.fcx); - - let cond_source_loc = cond.debug_loc(); - - let next_bcx; - match els { - Some(elexpr) => { - let else_bcx_in = bcx.fcx.new_id_block("else-block", elexpr.id); - let else_bcx_out = expr::trans_into(else_bcx_in, &elexpr, dest); - next_bcx = bcx.fcx.join_blocks(if_id, - &[then_bcx_out, else_bcx_out]); - CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb, cond_source_loc); - } - - None => { - next_bcx = bcx.fcx.new_id_block("next-block", if_id); - Br(then_bcx_out, next_bcx.llbb, DebugLoc::None); - CondBr(bcx, cond_val, then_bcx_in.llbb, next_bcx.llbb, cond_source_loc); - } - } - - // Clear the source location because it is still set to whatever has been translated - // right before. - DebugLoc::None.apply(next_bcx.fcx); - - next_bcx -} - -pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - loop_expr: &hir::Expr, - cond: &hir::Expr, - body: &hir::Block) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_while"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - - // bcx - // | - // cond_bcx_in <--------+ - // | | - // cond_bcx_out | - // | | | - // | body_bcx_in | - // cleanup_blk | | - // | body_bcx_out --+ - // next_bcx_in - - let next_bcx_in = fcx.new_id_block("while_exit", loop_expr.id); - let cond_bcx_in = fcx.new_id_block("while_cond", cond.id); - let body_bcx_in = fcx.new_id_block("while_body", body.id); - - fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, cond_bcx_in]); - - Br(bcx, cond_bcx_in.llbb, loop_expr.debug_loc()); - - // compile the block where we will handle loop cleanups - let cleanup_llbb = fcx.normal_exit_block(loop_expr.id, cleanup::EXIT_BREAK); - - // compile the condition - let Result {bcx: cond_bcx_out, val: cond_val} = - expr::trans(cond_bcx_in, cond).to_llbool(); - - CondBr(cond_bcx_out, cond_val, body_bcx_in.llbb, cleanup_llbb, cond.debug_loc()); - - // loop body: - let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore); - Br(body_bcx_out, cond_bcx_in.llbb, DebugLoc::None); - - fcx.pop_loop_cleanup_scope(loop_expr.id); - return next_bcx_in; -} - -pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - loop_expr: &hir::Expr, - body: &hir::Block) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_loop"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - - // bcx - // | - // body_bcx_in - // | - // body_bcx_out - // - // next_bcx - // - // Links between body_bcx_in and next_bcx are created by - // break statements. - - let next_bcx_in = bcx.fcx.new_id_block("loop_exit", loop_expr.id); - let body_bcx_in = bcx.fcx.new_id_block("loop_body", body.id); - - fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, body_bcx_in]); - - Br(bcx, body_bcx_in.llbb, loop_expr.debug_loc()); - let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore); - Br(body_bcx_out, body_bcx_in.llbb, DebugLoc::None); - - fcx.pop_loop_cleanup_scope(loop_expr.id); - - // If there are no predecessors for the next block, we just translated an endless loop and the - // next block is unreachable - if BasicBlock(next_bcx_in.llbb).pred_iter().next().is_none() { - Unreachable(next_bcx_in); - } - - return next_bcx_in; -} - -pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - opt_label: Option, - exit: usize) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_break_cont"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - - // Locate loop that we will break to - let loop_id = match opt_label { - None => fcx.top_loop_scope(), - Some(_) => { - match bcx.tcx().expect_def(expr.id) { - Def::Label(loop_id) => loop_id, - r => { - bug!("{:?} in def-map for label", r) - } - } - } - }; - - // Generate appropriate cleanup code and branch - let cleanup_llbb = fcx.normal_exit_block(loop_id, exit); - Br(bcx, cleanup_llbb, expr.debug_loc()); - Unreachable(bcx); // anything afterwards should be ignored - return bcx; -} - -pub fn trans_break<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - label_opt: Option) - -> Block<'blk, 'tcx> { - return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_BREAK); -} - -pub fn trans_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - label_opt: Option) - -> Block<'blk, 'tcx> { - return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_LOOP); -} - -pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - return_expr: &hir::Expr, - retval_expr: Option<&hir::Expr>) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_ret"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - let mut bcx = bcx; - if let Some(x) = retval_expr { - let dest = if fcx.llretslotptr.get().is_some() { - expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")) - } else { - expr::Ignore - }; - bcx = expr::trans_into(bcx, &x, dest); - match dest { - expr::SaveIn(slot) if fcx.needs_ret_allocas => { - Store(bcx, slot, fcx.llretslotptr.get().unwrap()); - } - _ => {} - } - } - let cleanup_llbb = fcx.return_exit_block(); - Br(bcx, cleanup_llbb, return_expr.debug_loc()); - Unreachable(bcx); - return bcx; -} - -pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - call_info: NodeIdAndSpan, - fail_str: InternedString) - -> Block<'blk, 'tcx> { - let ccx = bcx.ccx(); - let _icx = push_ctxt("trans_fail_value"); - - if bcx.unreachable.get() { - return bcx; - } - - let v_str = C_str_slice(ccx, fail_str); - let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo); - let filename = token::intern_and_get_ident(&loc.file.name); - let filename = C_str_slice(ccx, filename); - let line = C_u32(ccx, loc.line as u32); - let expr_file_line_const = C_struct(ccx, &[v_str, filename, line], false); - let align = machine::llalign_of_min(ccx, val_ty(expr_file_line_const)); - let expr_file_line = consts::addr_of(ccx, expr_file_line_const, align, "panic_loc"); - let args = vec!(expr_file_line); - let did = langcall(bcx.tcx(), Some(call_info.span), "", PanicFnLangItem); - Callee::def(ccx, did, ccx.tcx().mk_substs(Substs::empty())) - .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx -} - -pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - call_info: NodeIdAndSpan, - index: ValueRef, - len: ValueRef) - -> Block<'blk, 'tcx> { - let ccx = bcx.ccx(); - let _icx = push_ctxt("trans_fail_bounds_check"); - - if bcx.unreachable.get() { - return bcx; - } - - // Extract the file/line from the span - let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo); - let filename = token::intern_and_get_ident(&loc.file.name); - - // Invoke the lang item - let filename = C_str_slice(ccx, filename); - let line = C_u32(ccx, loc.line as u32); - let file_line_const = C_struct(ccx, &[filename, line], false); - let align = machine::llalign_of_min(ccx, val_ty(file_line_const)); - let file_line = consts::addr_of(ccx, file_line_const, align, "panic_bounds_check_loc"); - let args = vec!(file_line, index, len); - let did = langcall(bcx.tcx(), Some(call_info.span), "", PanicBoundsCheckFnLangItem); - Callee::def(ccx, did, ccx.tcx().mk_substs(Substs::empty())) - .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx -} diff --git a/src/librustc_trans/datum.rs b/src/librustc_trans/datum.rs deleted file mode 100644 index 875f88e37c..0000000000 --- a/src/librustc_trans/datum.rs +++ /dev/null @@ -1,828 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! ## The Datum module -//! -//! A `Datum` encapsulates the result of evaluating a Rust expression. It -//! contains a `ValueRef` indicating the result, a `Ty` describing -//! the Rust type, but also a *kind*. The kind indicates whether the datum -//! has cleanup scheduled (lvalue) or not (rvalue) and -- in the case of -//! rvalues -- whether or not the value is "by ref" or "by value". -//! -//! The datum API is designed to try and help you avoid memory errors like -//! forgetting to arrange cleanup or duplicating a value. The type of the -//! datum incorporates the kind, and thus reflects whether it has cleanup -//! scheduled: -//! -//! - `Datum` -- by ref, cleanup scheduled -//! - `Datum` -- by value or by ref, no cleanup scheduled -//! - `Datum` -- either `Datum` or `Datum` -//! -//! Rvalue and expr datums are noncopyable, and most of the methods on -//! datums consume the datum itself (with some notable exceptions). This -//! reflects the fact that datums may represent affine values which ought -//! to be consumed exactly once, and if you were to try to (for example) -//! store an affine value multiple times, you would be duplicating it, -//! which would certainly be a bug. -//! -//! Some of the datum methods, however, are designed to work only on -//! copyable values such as ints or pointers. Those methods may borrow the -//! datum (`&self`) rather than consume it, but they always include -//! assertions on the type of the value represented to check that this -//! makes sense. An example is `shallow_copy()`, which duplicates -//! a datum value. -//! -//! Translating an expression always yields a `Datum` result, but -//! the methods `to_[lr]value_datum()` can be used to coerce a -//! `Datum` into a `Datum` or `Datum` as -//! needed. Coercing to an lvalue is fairly common, and generally occurs -//! whenever it is necessary to inspect a value and pull out its -//! subcomponents (for example, a match, or indexing expression). Coercing -//! to an rvalue is more unusual; it occurs when moving values from place -//! to place, such as in an assignment expression or parameter passing. -//! -//! ### Lvalues in detail -//! -//! An lvalue datum is one for which cleanup has been scheduled. Lvalue -//! datums are always located in memory, and thus the `ValueRef` for an -//! LLVM value is always a pointer to the actual Rust value. This means -//! that if the Datum has a Rust type of `int`, then the LLVM type of the -//! `ValueRef` will be `int*` (pointer to int). -//! -//! Because lvalues already have cleanups scheduled, the memory must be -//! zeroed to prevent the cleanup from taking place (presuming that the -//! Rust type needs drop in the first place, otherwise it doesn't -//! matter). The Datum code automatically performs this zeroing when the -//! value is stored to a new location, for example. -//! -//! Lvalues usually result from evaluating lvalue expressions. For -//! example, evaluating a local variable `x` yields an lvalue, as does a -//! reference to a field like `x.f` or an index `x[i]`. -//! -//! Lvalue datums can also arise by *converting* an rvalue into an lvalue. -//! This is done with the `to_lvalue_datum` method defined on -//! `Datum`. Basically this method just schedules cleanup if the -//! datum is an rvalue, possibly storing the value into a stack slot first -//! if needed. Converting rvalues into lvalues occurs in constructs like -//! `&foo()` or `match foo() { ref x => ... }`, where the user is -//! implicitly requesting a temporary. -//! -//! ### Rvalues in detail -//! -//! Rvalues datums are values with no cleanup scheduled. One must be -//! careful with rvalue datums to ensure that cleanup is properly -//! arranged, usually by converting to an lvalue datum or by invoking the -//! `add_clean` method. -//! -//! ### Scratch datums -//! -//! Sometimes you need some temporary scratch space. The functions -//! `[lr]value_scratch_datum()` can be used to get temporary stack -//! space. As their name suggests, they yield lvalues and rvalues -//! respectively. That is, the slot from `lvalue_scratch_datum` will have -//! cleanup arranged, and the slot from `rvalue_scratch_datum` does not. - -pub use self::Expr::*; -pub use self::RvalueMode::*; - -use llvm::ValueRef; -use adt; -use base::*; -use build::{Load, Store}; -use common::*; -use cleanup; -use cleanup::{CleanupMethods, DropHintDatum, DropHintMethods}; -use expr; -use tvec; -use value::Value; -use rustc::ty::Ty; - -use std::fmt; -use syntax::ast; -use syntax_pos::DUMMY_SP; - -/// A `Datum` encapsulates the result of evaluating an expression. It -/// describes where the value is stored, what Rust type the value has, -/// whether it is addressed by reference, and so forth. Please refer -/// the section on datums in `README.md` for more details. -#[derive(Clone, Copy)] -pub struct Datum<'tcx, K> { - /// The llvm value. This is either a pointer to the Rust value or - /// the value itself, depending on `kind` below. - pub val: ValueRef, - - /// The rust type of the value. - pub ty: Ty<'tcx>, - - /// Indicates whether this is by-ref or by-value. - pub kind: K, -} - -impl<'tcx, K: fmt::Debug> fmt::Debug for Datum<'tcx, K> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Datum({:?}, {:?}, {:?})", - Value(self.val), self.ty, self.kind) - } -} - -pub struct DatumBlock<'blk, 'tcx: 'blk, K> { - pub bcx: Block<'blk, 'tcx>, - pub datum: Datum<'tcx, K>, -} - -#[derive(Debug)] -pub enum Expr { - /// a fresh value that was produced and which has no cleanup yet - /// because it has not yet "landed" into its permanent home - RvalueExpr(Rvalue), - - /// `val` is a pointer into memory for which a cleanup is scheduled - /// (and thus has type *T). If you move out of an Lvalue, you must - /// zero out the memory (FIXME #5016). - LvalueExpr(Lvalue), -} - -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum DropFlagInfo { - DontZeroJustUse(ast::NodeId), - ZeroAndMaintain(ast::NodeId), - None, -} - -impl DropFlagInfo { - pub fn must_zero(&self) -> bool { - match *self { - DropFlagInfo::DontZeroJustUse(..) => false, - DropFlagInfo::ZeroAndMaintain(..) => true, - DropFlagInfo::None => true, - } - } - - pub fn hint_datum<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) - -> Option> { - let id = match *self { - DropFlagInfo::None => return None, - DropFlagInfo::DontZeroJustUse(id) | - DropFlagInfo::ZeroAndMaintain(id) => id, - }; - - let hints = bcx.fcx.lldropflag_hints.borrow(); - let retval = hints.hint_datum(id); - assert!(retval.is_some(), "An id (={}) means must have a hint", id); - retval - } -} - -// FIXME: having Lvalue be `Copy` is a bit of a footgun, since clients -// may not realize that subparts of an Lvalue can have a subset of -// drop-flags associated with them, while this as written will just -// memcpy the drop_flag_info. But, it is an easier way to get `_match` -// off the ground to just let this be `Copy` for now. -#[derive(Copy, Clone, Debug)] -pub struct Lvalue { - pub source: &'static str, - pub drop_flag_info: DropFlagInfo -} - -#[derive(Debug)] -pub struct Rvalue { - pub mode: RvalueMode -} - -/// Classifies what action we should take when a value is moved away -/// with respect to its drop-flag. -/// -/// Long term there will be no need for this classification: all flags -/// (which will be stored on the stack frame) will have the same -/// interpretation and maintenance code associated with them. -#[derive(Copy, Clone, Debug)] -pub enum HintKind { - /// When the value is moved, set the drop-flag to "dropped" - /// (i.e. "zero the flag", even when the specific representation - /// is not literally 0) and when it is reinitialized, set the - /// drop-flag back to "initialized". - ZeroAndMaintain, - - /// When the value is moved, do not set the drop-flag to "dropped" - /// However, continue to read the drop-flag in deciding whether to - /// drop. (In essence, the path/fragment in question will never - /// need to be dropped at the points where it is moved away by - /// this code, but we are defending against the scenario where - /// some *other* code could move away (or drop) the value and thus - /// zero-the-flag, which is why we will still read from it. - DontZeroJustUse, -} - -impl Lvalue { // Constructors for various Lvalues. - pub fn new<'blk, 'tcx>(source: &'static str) -> Lvalue { - debug!("Lvalue at {} no drop flag info", source); - Lvalue { source: source, drop_flag_info: DropFlagInfo::None } - } - - pub fn new_dropflag_hint(source: &'static str) -> Lvalue { - debug!("Lvalue at {} is drop flag hint", source); - Lvalue { source: source, drop_flag_info: DropFlagInfo::None } - } - - pub fn new_with_hint<'blk, 'tcx>(source: &'static str, - bcx: Block<'blk, 'tcx>, - id: ast::NodeId, - k: HintKind) -> Lvalue { - let (opt_id, info) = { - let hint_available = Lvalue::has_dropflag_hint(bcx, id) && - bcx.tcx().sess.nonzeroing_move_hints(); - let info = match k { - HintKind::ZeroAndMaintain if hint_available => - DropFlagInfo::ZeroAndMaintain(id), - HintKind::DontZeroJustUse if hint_available => - DropFlagInfo::DontZeroJustUse(id), - _ => - DropFlagInfo::None, - }; - (Some(id), info) - }; - debug!("Lvalue at {}, id: {:?} info: {:?}", source, opt_id, info); - Lvalue { source: source, drop_flag_info: info } - } -} // end Lvalue constructor methods. - -impl Lvalue { - fn has_dropflag_hint<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - id: ast::NodeId) -> bool { - let hints = bcx.fcx.lldropflag_hints.borrow(); - hints.has_hint(id) - } - pub fn dropflag_hint<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) - -> Option> { - self.drop_flag_info.hint_datum(bcx) - } -} - -impl Rvalue { - pub fn new(m: RvalueMode) -> Rvalue { - Rvalue { mode: m } - } -} - -// Make Datum linear for more type safety. -impl Drop for Rvalue { - fn drop(&mut self) { } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub enum RvalueMode { - /// `val` is a pointer to the actual value (and thus has type *T) - ByRef, - - /// `val` is the actual value (*only used for immediates* like ints, ptrs) - ByValue, -} - -pub fn immediate_rvalue<'tcx>(val: ValueRef, ty: Ty<'tcx>) -> Datum<'tcx, Rvalue> { - return Datum::new(val, ty, Rvalue::new(ByValue)); -} - -pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> DatumBlock<'blk, 'tcx, Rvalue> { - return DatumBlock::new(bcx, immediate_rvalue(val, ty)) -} - -/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to -/// it. The memory will be dropped upon exit from `scope`. The callback `populate` should -/// initialize the memory. -/// -/// The flag `zero` indicates how the temporary space itself should be -/// initialized at the outset of the function; the only time that -/// `InitAlloca::Uninit` is a valid value for `zero` is when the -/// caller can prove that either (1.) the code injected by `populate` -/// onto `bcx` always dominates the end of `scope`, or (2.) the data -/// being allocated has no associated destructor. -pub fn lvalue_scratch_datum<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - name: &str, - zero: InitAlloca, - scope: cleanup::ScopeId, - populate: F) - -> DatumBlock<'blk, 'tcx, Lvalue> where - F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, -{ - // Very subtle: potentially initialize the scratch memory at point where it is alloca'ed. - // (See discussion at Issue 30530.) - let scratch = alloc_ty_init(bcx, ty, zero, name); - debug!("lvalue_scratch_datum scope={:?} scratch={:?} ty={:?}", - scope, Value(scratch), ty); - - // Subtle. Populate the scratch memory *before* scheduling cleanup. - let bcx = populate(bcx, scratch); - bcx.fcx.schedule_drop_mem(scope, scratch, ty, None); - - DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum"))) -} - -/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to -/// it. If `zero` is true, the space will be zeroed when it is allocated; this is normally not -/// necessary, but in the case of automatic rooting in match statements it is possible to have -/// temporaries that may not get initialized if a certain arm is not taken, so we must zero them. -/// You must arrange any cleanups etc yourself! -pub fn rvalue_scratch_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - name: &str) - -> Datum<'tcx, Rvalue> { - let scratch = alloc_ty(bcx, ty, name); - call_lifetime_start(bcx, scratch); - Datum::new(scratch, ty, Rvalue::new(ByRef)) -} - -/// Indicates the "appropriate" mode for this value, which is either by ref or by value, depending -/// on whether type is immediate or not. -pub fn appropriate_rvalue_mode<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> RvalueMode { - if type_is_immediate(ccx, ty) { - ByValue - } else { - ByRef - } -} - -fn add_rvalue_clean<'a, 'tcx>(mode: RvalueMode, - fcx: &FunctionContext<'a, 'tcx>, - scope: cleanup::ScopeId, - val: ValueRef, - ty: Ty<'tcx>) { - debug!("add_rvalue_clean scope={:?} val={:?} ty={:?}", - scope, Value(val), ty); - match mode { - ByValue => { fcx.schedule_drop_immediate(scope, val, ty); } - ByRef => { - fcx.schedule_lifetime_end(scope, val); - fcx.schedule_drop_mem(scope, val, ty, None); - } - } -} - -pub trait KindOps { - - /// Take appropriate action after the value in `datum` has been - /// stored to a new location. - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> Block<'blk, 'tcx>; - - /// True if this mode is a reference mode, meaning that the datum's - /// val field is a pointer to the actual value - fn is_by_ref(&self) -> bool; - - /// Converts to an Expr kind - fn to_expr_kind(self) -> Expr; - -} - -impl KindOps for Rvalue { - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - _val: ValueRef, - _ty: Ty<'tcx>) - -> Block<'blk, 'tcx> { - // No cleanup is scheduled for an rvalue, so we don't have - // to do anything after a move to cancel or duplicate it. - if self.is_by_ref() { - call_lifetime_end(bcx, _val); - } - bcx - } - - fn is_by_ref(&self) -> bool { - self.mode == ByRef - } - - fn to_expr_kind(self) -> Expr { - RvalueExpr(self) - } -} - -impl KindOps for Lvalue { - /// If an lvalue is moved, we must zero out the memory in which it resides so as to cancel - /// cleanup. If an @T lvalue is copied, we must increment the reference count. - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("::post_store"); - if bcx.fcx.type_needs_drop(ty) { - // cancel cleanup of affine values: - // 1. if it has drop-hint, mark as moved; then code - // aware of drop-hint won't bother calling the - // drop-glue itself. - if let Some(hint_datum) = self.drop_flag_info.hint_datum(bcx) { - let moved_hint_byte = adt::DTOR_MOVED_HINT; - let hint_llval = hint_datum.to_value().value(); - Store(bcx, C_u8(bcx.fcx.ccx, moved_hint_byte), hint_llval); - } - // 2. if the drop info says its necessary, drop-fill the memory. - if self.drop_flag_info.must_zero() { - let () = drop_done_fill_mem(bcx, val, ty); - } - bcx - } else { - // FIXME (#5016) would be nice to assert this, but we have - // to allow for e.g. DontZeroJustUse flags, for now. - // - // (The dropflag hint construction should be taking - // !type_needs_drop into account; earlier analysis phases - // may not have all the info they need to include such - // information properly, I think; in particular the - // fragments analysis works on a non-monomorphized view of - // the code.) - // - // assert_eq!(self.drop_flag_info, DropFlagInfo::None); - bcx - } - } - - fn is_by_ref(&self) -> bool { - true - } - - fn to_expr_kind(self) -> Expr { - LvalueExpr(self) - } -} - -impl KindOps for Expr { - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> Block<'blk, 'tcx> { - match *self { - LvalueExpr(ref l) => l.post_store(bcx, val, ty), - RvalueExpr(ref r) => r.post_store(bcx, val, ty), - } - } - - fn is_by_ref(&self) -> bool { - match *self { - LvalueExpr(ref l) => l.is_by_ref(), - RvalueExpr(ref r) => r.is_by_ref() - } - } - - fn to_expr_kind(self) -> Expr { - self - } -} - -impl<'tcx> Datum<'tcx, Rvalue> { - /// Schedules a cleanup for this datum in the given scope. That means that this datum is no - /// longer an rvalue datum; hence, this function consumes the datum and returns the contained - /// ValueRef. - pub fn add_clean<'a>(self, - fcx: &FunctionContext<'a, 'tcx>, - scope: cleanup::ScopeId) - -> ValueRef { - add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty); - self.val - } - - /// Returns an lvalue datum (that is, a by ref datum with cleanup scheduled). If `self` is not - /// already an lvalue, cleanup will be scheduled in the temporary scope for `expr_id`. - pub fn to_lvalue_datum_in_scope<'blk>(self, - bcx: Block<'blk, 'tcx>, - name: &str, - scope: cleanup::ScopeId) - -> DatumBlock<'blk, 'tcx, Lvalue> { - let fcx = bcx.fcx; - - match self.kind.mode { - ByRef => { - add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty); - DatumBlock::new(bcx, Datum::new( - self.val, - self.ty, - Lvalue::new("datum::to_lvalue_datum_in_scope"))) - } - - ByValue => { - lvalue_scratch_datum( - bcx, self.ty, name, InitAlloca::Dropped, scope, - |bcx, llval| { - debug!("populate call for Datum::to_lvalue_datum_in_scope \ - self.ty={:?}", self.ty); - // do not call_lifetime_start here; the - // `InitAlloc::Dropped` will start scratch - // value's lifetime at open of function body. - let bcx = self.store_to(bcx, llval); - bcx.fcx.schedule_lifetime_end(scope, llval); - bcx - }) - } - } - } - - pub fn to_ref_datum<'blk>(self, bcx: Block<'blk, 'tcx>) - -> DatumBlock<'blk, 'tcx, Rvalue> { - let mut bcx = bcx; - match self.kind.mode { - ByRef => DatumBlock::new(bcx, self), - ByValue => { - let scratch = rvalue_scratch_datum(bcx, self.ty, "to_ref"); - bcx = self.store_to(bcx, scratch.val); - DatumBlock::new(bcx, scratch) - } - } - } - - pub fn to_appropriate_datum<'blk>(self, bcx: Block<'blk, 'tcx>) - -> DatumBlock<'blk, 'tcx, Rvalue> { - match self.appropriate_rvalue_mode(bcx.ccx()) { - ByRef => { - self.to_ref_datum(bcx) - } - ByValue => { - match self.kind.mode { - ByValue => DatumBlock::new(bcx, self), - ByRef => { - let llval = load_ty(bcx, self.val, self.ty); - call_lifetime_end(bcx, self.val); - DatumBlock::new(bcx, Datum::new(llval, self.ty, Rvalue::new(ByValue))) - } - } - } - } - } -} - -/// Methods suitable for "expr" datums that could be either lvalues or -/// rvalues. These include coercions into lvalues/rvalues but also a number -/// of more general operations. (Some of those operations could be moved to -/// the more general `impl Datum`, but it's convenient to have them -/// here since we can `match self.kind` rather than having to implement -/// generic methods in `KindOps`.) -impl<'tcx> Datum<'tcx, Expr> { - fn match_kind(self, if_lvalue: F, if_rvalue: G) -> R where - F: FnOnce(Datum<'tcx, Lvalue>) -> R, - G: FnOnce(Datum<'tcx, Rvalue>) -> R, - { - let Datum { val, ty, kind } = self; - match kind { - LvalueExpr(l) => if_lvalue(Datum::new(val, ty, l)), - RvalueExpr(r) => if_rvalue(Datum::new(val, ty, r)), - } - } - - /// Asserts that this datum *is* an lvalue and returns it. - #[allow(dead_code)] // potentially useful - pub fn assert_lvalue(self) -> Datum<'tcx, Lvalue> { - self.match_kind( - |d| d, - |_| bug!("assert_lvalue given rvalue")) - } - - pub fn store_to_dest<'blk>(self, - bcx: Block<'blk, 'tcx>, - dest: expr::Dest, - expr_id: ast::NodeId) - -> Block<'blk, 'tcx> { - match dest { - expr::Ignore => { - self.add_clean_if_rvalue(bcx, expr_id); - bcx - } - expr::SaveIn(addr) => { - self.store_to(bcx, addr) - } - } - } - - /// Arranges cleanup for `self` if it is an rvalue. Use when you are done working with a value - /// that may need drop. - pub fn add_clean_if_rvalue<'blk>(self, - bcx: Block<'blk, 'tcx>, - expr_id: ast::NodeId) { - self.match_kind( - |_| { /* Nothing to do, cleanup already arranged */ }, - |r| { - let scope = cleanup::temporary_scope(bcx.tcx(), expr_id); - r.add_clean(bcx.fcx, scope); - }) - } - - pub fn to_lvalue_datum<'blk>(self, - bcx: Block<'blk, 'tcx>, - name: &str, - expr_id: ast::NodeId) - -> DatumBlock<'blk, 'tcx, Lvalue> { - debug!("to_lvalue_datum self: {:?}", self); - - self.match_kind( - |l| DatumBlock::new(bcx, l), - |r| { - let scope = cleanup::temporary_scope(bcx.tcx(), expr_id); - r.to_lvalue_datum_in_scope(bcx, name, scope) - }) - } - - /// Ensures that we have an rvalue datum (that is, a datum with no cleanup scheduled). - pub fn to_rvalue_datum<'blk>(self, - bcx: Block<'blk, 'tcx>, - name: &'static str) - -> DatumBlock<'blk, 'tcx, Rvalue> { - self.match_kind( - |l| { - let mut bcx = bcx; - match l.appropriate_rvalue_mode(bcx.ccx()) { - ByRef => { - let scratch = rvalue_scratch_datum(bcx, l.ty, name); - bcx = l.store_to(bcx, scratch.val); - DatumBlock::new(bcx, scratch) - } - ByValue => { - let v = load_ty(bcx, l.val, l.ty); - bcx = l.kind.post_store(bcx, l.val, l.ty); - DatumBlock::new(bcx, Datum::new(v, l.ty, Rvalue::new(ByValue))) - } - } - }, - |r| DatumBlock::new(bcx, r)) - } - -} - -/// Methods suitable only for lvalues. These include the various -/// operations to extract components out of compound data structures, -/// such as extracting the field from a struct or a particular element -/// from an array. -impl<'tcx> Datum<'tcx, Lvalue> { - /// Converts a datum into a by-ref value. The datum type must be one which is always passed by - /// reference. - pub fn to_llref(self) -> ValueRef { - self.val - } - - // Extracts a component of a compound data structure (e.g., a field from a - // struct). Note that if self is an opened, unsized type then the returned - // datum may also be unsized _without the size information_. It is the - // callers responsibility to package the result in some way to make a valid - // datum in that case (e.g., by making a fat pointer or opened pair). - pub fn get_element<'blk, F>(&self, bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>, - gep: F) - -> Datum<'tcx, Lvalue> where - F: FnOnce(adt::MaybeSizedValue) -> ValueRef, - { - let val = if type_is_sized(bcx.tcx(), self.ty) { - let val = adt::MaybeSizedValue::sized(self.val); - gep(val) - } else { - let val = adt::MaybeSizedValue::unsized_( - Load(bcx, expr::get_dataptr(bcx, self.val)), - Load(bcx, expr::get_meta(bcx, self.val))); - gep(val) - }; - Datum { - val: val, - kind: Lvalue::new("Datum::get_element"), - ty: ty, - } - } - - pub fn get_vec_base_and_len<'blk>(&self, bcx: Block<'blk, 'tcx>) - -> (ValueRef, ValueRef) { - //! Converts a vector into the slice pair. - - tvec::get_base_and_len(bcx, self.val, self.ty) - } -} - -/// Generic methods applicable to any sort of datum. -impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> { - pub fn new(val: ValueRef, ty: Ty<'tcx>, kind: K) -> Datum<'tcx, K> { - Datum { val: val, ty: ty, kind: kind } - } - - pub fn to_expr_datum(self) -> Datum<'tcx, Expr> { - let Datum { val, ty, kind } = self; - Datum { val: val, ty: ty, kind: kind.to_expr_kind() } - } - - /// Moves or copies this value into a new home, as appropriate depending on the type of the - /// datum. This method consumes the datum, since it would be incorrect to go on using the datum - /// if the value represented is affine (and hence the value is moved). - pub fn store_to<'blk>(self, - bcx: Block<'blk, 'tcx>, - dst: ValueRef) - -> Block<'blk, 'tcx> { - self.shallow_copy_raw(bcx, dst); - - self.kind.post_store(bcx, self.val, self.ty) - } - - /// Helper function that performs a shallow copy of this value into `dst`, which should be a - /// pointer to a memory location suitable for `self.ty`. `dst` should contain uninitialized - /// memory (either newly allocated, zeroed, or dropped). - /// - /// This function is private to datums because it leaves memory in an unstable state, where the - /// source value has been copied but not zeroed. Public methods are `store_to` (if you no - /// longer need the source value) or `shallow_copy` (if you wish the source value to remain - /// valid). - fn shallow_copy_raw<'blk>(&self, - bcx: Block<'blk, 'tcx>, - dst: ValueRef) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("copy_to_no_check"); - - if type_is_zero_size(bcx.ccx(), self.ty) { - return bcx; - } - - if self.kind.is_by_ref() { - memcpy_ty(bcx, dst, self.val, self.ty); - } else { - store_ty(bcx, self.val, dst, self.ty); - } - - return bcx; - } - - /// Copies the value into a new location. This function always preserves the existing datum as - /// a valid value. Therefore, it does not consume `self` and, also, cannot be applied to affine - /// values (since they must never be duplicated). - pub fn shallow_copy<'blk>(&self, - bcx: Block<'blk, 'tcx>, - dst: ValueRef) - -> Block<'blk, 'tcx> { - /*! - * Copies the value into a new location. This function always - * preserves the existing datum as a valid value. Therefore, - * it does not consume `self` and, also, cannot be applied to - * affine values (since they must never be duplicated). - */ - - assert!(!self.ty.moves_by_default(bcx.tcx(), - &bcx.tcx().empty_parameter_environment(), DUMMY_SP)); - self.shallow_copy_raw(bcx, dst) - } - - /// See the `appropriate_rvalue_mode()` function - pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>) - -> RvalueMode { - appropriate_rvalue_mode(ccx, self.ty) - } - - /// Converts `self` into a by-value `ValueRef`. Consumes this datum (i.e., absolves you of - /// responsibility to cleanup the value). For this to work, the value must be something - /// scalar-ish (like an int or a pointer) which (1) does not require drop glue and (2) is - /// naturally passed around by value, and not by reference. - pub fn to_llscalarish<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef { - assert!(!bcx.fcx.type_needs_drop(self.ty)); - assert!(self.appropriate_rvalue_mode(bcx.ccx()) == ByValue); - if self.kind.is_by_ref() { - load_ty(bcx, self.val, self.ty) - } else { - self.val - } - } - - pub fn to_llbool<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef { - assert!(self.ty.is_bool()); - self.to_llscalarish(bcx) - } -} - -impl<'blk, 'tcx, K> DatumBlock<'blk, 'tcx, K> { - pub fn new(bcx: Block<'blk, 'tcx>, datum: Datum<'tcx, K>) - -> DatumBlock<'blk, 'tcx, K> { - DatumBlock { bcx: bcx, datum: datum } - } -} - -impl<'blk, 'tcx, K: KindOps + fmt::Debug> DatumBlock<'blk, 'tcx, K> { - pub fn to_expr_datumblock(self) -> DatumBlock<'blk, 'tcx, Expr> { - DatumBlock::new(self.bcx, self.datum.to_expr_datum()) - } -} - -impl<'blk, 'tcx> DatumBlock<'blk, 'tcx, Expr> { - pub fn store_to_dest(self, - dest: expr::Dest, - expr_id: ast::NodeId) -> Block<'blk, 'tcx> { - let DatumBlock { bcx, datum } = self; - datum.store_to_dest(bcx, dest, expr_id) - } - - pub fn to_llbool(self) -> Result<'blk, 'tcx> { - let DatumBlock { datum, bcx } = self; - Result::new(bcx, datum.to_llbool(bcx)) - } -} diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs index fe6a48d4c5..21716d55ac 100644 --- a/src/librustc_trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -15,64 +15,43 @@ use super::utils::{DIB, span_start}; use llvm; use llvm::debuginfo::{DIScope, DISubprogram}; use common::{CrateContext, FunctionContext}; -use rustc::hir::pat_util; use rustc::mir::repr::{Mir, VisibilityScope}; -use rustc::util::nodemap::NodeMap; use libc::c_uint; use std::ptr; -use syntax_pos::{Span, Pos}; -use syntax::{ast, codemap}; +use syntax_pos::Pos; use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use rustc::hir::{self, PatKind}; -// This procedure builds the *scope map* for a given function, which maps any -// given ast::NodeId in the function's AST to the correct DIScope metadata instance. -// -// This builder procedure walks the AST in execution order and keeps track of -// what belongs to which scope, creating DIScope DIEs along the way, and -// introducing *artificial* lexical scope descriptors where necessary. These -// artificial scopes allow GDB to correctly handle name shadowing. -pub fn create_scope_map(cx: &CrateContext, - args: &[hir::Arg], - fn_entry_block: &hir::Block, - fn_metadata: DISubprogram, - fn_ast_id: ast::NodeId) - -> NodeMap { - let mut scope_map = NodeMap(); - let mut scope_stack = vec!(ScopeStackEntry { scope_metadata: fn_metadata, name: None }); - scope_map.insert(fn_ast_id, fn_metadata); - - // Push argument identifiers onto the stack so arguments integrate nicely - // with variable shadowing. - for arg in args { - pat_util::pat_bindings(&arg.pat, |_, node_id, _, path1| { - scope_stack.push(ScopeStackEntry { scope_metadata: fn_metadata, - name: Some(path1.node) }); - scope_map.insert(node_id, fn_metadata); - }) - } +use syntax_pos::BytePos; - // Clang creates a separate scope for function bodies, so let's do this too. - with_new_scope(cx, - fn_entry_block.span, - &mut scope_stack, - &mut scope_map, - |cx, scope_stack, scope_map| { - walk_block(cx, fn_entry_block, scope_stack, scope_map); - }); +#[derive(Clone, Copy, Debug)] +pub struct MirDebugScope { + pub scope_metadata: DIScope, + // Start and end offsets of the file to which this DIScope belongs. + // These are used to quickly determine whether some span refers to the same file. + pub file_start_pos: BytePos, + pub file_end_pos: BytePos, +} - return scope_map; +impl MirDebugScope { + pub fn is_valid(&self) -> bool { + !self.scope_metadata.is_null() + } } /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. -pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec { +pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec { let mir = fcx.mir.clone().expect("create_mir_scopes: missing MIR for fn"); - let mut scopes = IndexVec::from_elem(ptr::null_mut(), &mir.visibility_scopes); + let null_scope = MirDebugScope { + scope_metadata: ptr::null_mut(), + file_start_pos: BytePos(0), + file_end_pos: BytePos(0) + }; + let mut scopes = IndexVec::from_elem(null_scope, &mir.visibility_scopes); let fn_metadata = match fcx.debug_context { FunctionDebugContext::RegularContext(box ref data) => data.fn_metadata, @@ -102,8 +81,8 @@ fn make_mir_scope(ccx: &CrateContext, has_variables: &BitVector, fn_metadata: DISubprogram, scope: VisibilityScope, - scopes: &mut IndexVec) { - if !scopes[scope].is_null() { + scopes: &mut IndexVec) { + if scopes[scope].is_valid() { return; } @@ -113,7 +92,12 @@ fn make_mir_scope(ccx: &CrateContext, scopes[parent] } else { // The root is the function itself. - scopes[scope] = fn_metadata; + let loc = span_start(ccx, mir.span); + scopes[scope] = MirDebugScope { + scope_metadata: fn_metadata, + file_start_pos: loc.file.start_pos, + file_end_pos: loc.file.end_pos, + }; return; }; @@ -124,422 +108,25 @@ fn make_mir_scope(ccx: &CrateContext, // However, we don't skip creating a nested scope if // our parent is the root, because we might want to // put arguments in the root and not have shadowing. - if parent_scope != fn_metadata { + if parent_scope.scope_metadata != fn_metadata { scopes[scope] = parent_scope; return; } } let loc = span_start(ccx, scope_data.span); - scopes[scope] = unsafe { let file_metadata = file_metadata(ccx, &loc.file.name, &loc.file.abs_path); + let scope_metadata = unsafe { llvm::LLVMRustDIBuilderCreateLexicalBlock( DIB(ccx), - parent_scope, + parent_scope.scope_metadata, file_metadata, loc.line as c_uint, loc.col.to_usize() as c_uint) }; -} - -// local helper functions for walking the AST. -fn with_new_scope(cx: &CrateContext, - scope_span: Span, - scope_stack: &mut Vec , - scope_map: &mut NodeMap, - inner_walk: F) where - F: FnOnce(&CrateContext, &mut Vec, &mut NodeMap), -{ - // Create a new lexical scope and push it onto the stack - let loc = span_start(cx, scope_span); - let file_metadata = file_metadata(cx, &loc.file.name, &loc.file.abs_path); - let parent_scope = scope_stack.last().unwrap().scope_metadata; - - let scope_metadata = unsafe { - llvm::LLVMRustDIBuilderCreateLexicalBlock( - DIB(cx), - parent_scope, - file_metadata, - loc.line as c_uint, - loc.col.to_usize() as c_uint) + scopes[scope] = MirDebugScope { + scope_metadata: scope_metadata, + file_start_pos: loc.file.start_pos, + file_end_pos: loc.file.end_pos, }; - - scope_stack.push(ScopeStackEntry { scope_metadata: scope_metadata, name: None }); - - inner_walk(cx, scope_stack, scope_map); - - // pop artificial scopes - while scope_stack.last().unwrap().name.is_some() { - scope_stack.pop(); - } - - if scope_stack.last().unwrap().scope_metadata != scope_metadata { - span_bug!(scope_span, "debuginfo: Inconsistency in scope management."); - } - - scope_stack.pop(); -} - -struct ScopeStackEntry { - scope_metadata: DIScope, - name: Option -} - -fn walk_block(cx: &CrateContext, - block: &hir::Block, - scope_stack: &mut Vec , - scope_map: &mut NodeMap) { - scope_map.insert(block.id, scope_stack.last().unwrap().scope_metadata); - - // The interesting things here are statements and the concluding expression. - for statement in &block.stmts { - scope_map.insert(statement.node.id(), - scope_stack.last().unwrap().scope_metadata); - - match statement.node { - hir::StmtDecl(ref decl, _) => - walk_decl(cx, &decl, scope_stack, scope_map), - hir::StmtExpr(ref exp, _) | - hir::StmtSemi(ref exp, _) => - walk_expr(cx, &exp, scope_stack, scope_map), - } - } - - if let Some(ref exp) = block.expr { - walk_expr(cx, &exp, scope_stack, scope_map); - } -} - -fn walk_decl(cx: &CrateContext, - decl: &hir::Decl, - scope_stack: &mut Vec , - scope_map: &mut NodeMap) { - match *decl { - codemap::Spanned { node: hir::DeclLocal(ref local), .. } => { - scope_map.insert(local.id, scope_stack.last().unwrap().scope_metadata); - - walk_pattern(cx, &local.pat, scope_stack, scope_map); - - if let Some(ref exp) = local.init { - walk_expr(cx, &exp, scope_stack, scope_map); - } - } - _ => () - } -} - -fn walk_pattern(cx: &CrateContext, - pat: &hir::Pat, - scope_stack: &mut Vec , - scope_map: &mut NodeMap) { - // Unfortunately, we cannot just use pat_util::pat_bindings() or - // ast_util::walk_pat() here because we have to visit *all* nodes in - // order to put them into the scope map. The above functions don't do that. - match pat.node { - PatKind::Binding(_, ref path1, ref sub_pat_opt) => { - // LLVM does not properly generate 'DW_AT_start_scope' fields - // for variable DIEs. For this reason we have to introduce - // an artificial scope at bindings whenever a variable with - // the same name is declared in *any* parent scope. - // - // Otherwise the following error occurs: - // - // let x = 10; - // - // do_something(); // 'gdb print x' correctly prints 10 - // - // { - // do_something(); // 'gdb print x' prints 0, because it - // // already reads the uninitialized 'x' - // // from the next line... - // let x = 100; - // do_something(); // 'gdb print x' correctly prints 100 - // } - - // Is there already a binding with that name? - // N.B.: this comparison must be UNhygienic... because - // gdb knows nothing about the context, so any two - // variables with the same name will cause the problem. - let name = path1.node; - let need_new_scope = scope_stack - .iter() - .any(|entry| entry.name == Some(name)); - - if need_new_scope { - // Create a new lexical scope and push it onto the stack - let loc = span_start(cx, pat.span); - let file_metadata = file_metadata(cx, &loc.file.name, &loc.file.abs_path); - let parent_scope = scope_stack.last().unwrap().scope_metadata; - - let scope_metadata = unsafe { - llvm::LLVMRustDIBuilderCreateLexicalBlock( - DIB(cx), - parent_scope, - file_metadata, - loc.line as c_uint, - loc.col.to_usize() as c_uint) - }; - - scope_stack.push(ScopeStackEntry { - scope_metadata: scope_metadata, - name: Some(name) - }); - - } else { - // Push a new entry anyway so the name can be found - let prev_metadata = scope_stack.last().unwrap().scope_metadata; - scope_stack.push(ScopeStackEntry { - scope_metadata: prev_metadata, - name: Some(name) - }); - } - - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - if let Some(ref sub_pat) = *sub_pat_opt { - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - } - - PatKind::Wild => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - } - - PatKind::TupleStruct(_, ref sub_pats, _) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - for p in sub_pats { - walk_pattern(cx, &p, scope_stack, scope_map); - } - } - - PatKind::Path(..) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - } - - PatKind::Struct(_, ref field_pats, _) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - for &codemap::Spanned { - node: hir::FieldPat { pat: ref sub_pat, .. }, - .. - } in field_pats { - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - } - - PatKind::Tuple(ref sub_pats, _) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - for sub_pat in sub_pats { - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - } - - PatKind::Box(ref sub_pat) | PatKind::Ref(ref sub_pat, _) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - - PatKind::Lit(ref exp) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - walk_expr(cx, &exp, scope_stack, scope_map); - } - - PatKind::Range(ref exp1, ref exp2) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - walk_expr(cx, &exp1, scope_stack, scope_map); - walk_expr(cx, &exp2, scope_stack, scope_map); - } - - PatKind::Vec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - for sub_pat in front_sub_pats { - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - - if let Some(ref sub_pat) = *middle_sub_pats { - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - - for sub_pat in back_sub_pats { - walk_pattern(cx, &sub_pat, scope_stack, scope_map); - } - } - } -} - -fn walk_expr(cx: &CrateContext, - exp: &hir::Expr, - scope_stack: &mut Vec , - scope_map: &mut NodeMap) { - - scope_map.insert(exp.id, scope_stack.last().unwrap().scope_metadata); - - match exp.node { - hir::ExprLit(_) | - hir::ExprBreak(_) | - hir::ExprAgain(_) | - hir::ExprPath(..) => {} - - hir::ExprCast(ref sub_exp, _) | - hir::ExprType(ref sub_exp, _) | - hir::ExprAddrOf(_, ref sub_exp) | - hir::ExprField(ref sub_exp, _) | - hir::ExprTupField(ref sub_exp, _) => - walk_expr(cx, &sub_exp, scope_stack, scope_map), - - hir::ExprBox(ref sub_expr) => { - walk_expr(cx, &sub_expr, scope_stack, scope_map); - } - - hir::ExprRet(ref exp_opt) => match *exp_opt { - Some(ref sub_exp) => walk_expr(cx, &sub_exp, scope_stack, scope_map), - None => () - }, - - hir::ExprUnary(_, ref sub_exp) => { - walk_expr(cx, &sub_exp, scope_stack, scope_map); - } - - hir::ExprAssignOp(_, ref lhs, ref rhs) | - hir::ExprIndex(ref lhs, ref rhs) | - hir::ExprBinary(_, ref lhs, ref rhs) => { - walk_expr(cx, &lhs, scope_stack, scope_map); - walk_expr(cx, &rhs, scope_stack, scope_map); - } - - hir::ExprVec(ref init_expressions) | - hir::ExprTup(ref init_expressions) => { - for ie in init_expressions { - walk_expr(cx, &ie, scope_stack, scope_map); - } - } - - hir::ExprAssign(ref sub_exp1, ref sub_exp2) | - hir::ExprRepeat(ref sub_exp1, ref sub_exp2) => { - walk_expr(cx, &sub_exp1, scope_stack, scope_map); - walk_expr(cx, &sub_exp2, scope_stack, scope_map); - } - - hir::ExprIf(ref cond_exp, ref then_block, ref opt_else_exp) => { - walk_expr(cx, &cond_exp, scope_stack, scope_map); - - with_new_scope(cx, - then_block.span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - walk_block(cx, &then_block, scope_stack, scope_map); - }); - - match *opt_else_exp { - Some(ref else_exp) => - walk_expr(cx, &else_exp, scope_stack, scope_map), - _ => () - } - } - - hir::ExprWhile(ref cond_exp, ref loop_body, _) => { - walk_expr(cx, &cond_exp, scope_stack, scope_map); - - with_new_scope(cx, - loop_body.span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - walk_block(cx, &loop_body, scope_stack, scope_map); - }) - } - - hir::ExprLoop(ref block, _) | - hir::ExprBlock(ref block) => { - with_new_scope(cx, - block.span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - walk_block(cx, &block, scope_stack, scope_map); - }) - } - - hir::ExprClosure(_, ref decl, ref block, _) => { - with_new_scope(cx, - block.span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - for &hir::Arg { pat: ref pattern, .. } in &decl.inputs { - walk_pattern(cx, &pattern, scope_stack, scope_map); - } - - walk_block(cx, &block, scope_stack, scope_map); - }) - } - - hir::ExprCall(ref fn_exp, ref args) => { - walk_expr(cx, &fn_exp, scope_stack, scope_map); - - for arg_exp in args { - walk_expr(cx, &arg_exp, scope_stack, scope_map); - } - } - - hir::ExprMethodCall(_, _, ref args) => { - for arg_exp in args { - walk_expr(cx, &arg_exp, scope_stack, scope_map); - } - } - - hir::ExprMatch(ref discriminant_exp, ref arms, _) => { - walk_expr(cx, &discriminant_exp, scope_stack, scope_map); - - // For each arm we have to first walk the pattern as these might - // introduce new artificial scopes. It should be sufficient to - // walk only one pattern per arm, as they all must contain the - // same binding names. - - for arm_ref in arms { - let arm_span = arm_ref.pats[0].span; - - with_new_scope(cx, - arm_span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - for pat in &arm_ref.pats { - walk_pattern(cx, &pat, scope_stack, scope_map); - } - - if let Some(ref guard_exp) = arm_ref.guard { - walk_expr(cx, &guard_exp, scope_stack, scope_map) - } - - walk_expr(cx, &arm_ref.body, scope_stack, scope_map); - }) - } - } - - hir::ExprStruct(_, ref fields, ref base_exp) => { - for &hir::Field { expr: ref exp, .. } in fields { - walk_expr(cx, &exp, scope_stack, scope_map); - } - - match *base_exp { - Some(ref exp) => walk_expr(cx, &exp, scope_stack, scope_map), - None => () - } - } - - hir::ExprInlineAsm(_, ref outputs, ref inputs) => { - for output in outputs { - walk_expr(cx, output, scope_stack, scope_map); - } - - for input in inputs { - walk_expr(cx, input, scope_stack, scope_map); - } - } - } } diff --git a/src/librustc_trans/debuginfo/gdb.rs b/src/librustc_trans/debuginfo/gdb.rs index 0a8d490dcd..8f937d3fe2 100644 --- a/src/librustc_trans/debuginfo/gdb.rs +++ b/src/librustc_trans/debuginfo/gdb.rs @@ -77,7 +77,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(ccx: &CrateContext) llvm::LLVMSetInitializer(section_var, C_bytes(ccx, section_contents)); llvm::LLVMSetGlobalConstant(section_var, llvm::True); llvm::LLVMSetUnnamedAddr(section_var, llvm::True); - llvm::LLVMSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage); + llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage); // This should make sure that the whole section is not larger than // the string it contains. Otherwise we get a warning from GDB. llvm::LLVMSetAlignment(section_var, 1); diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 3fe8b2b667..ffca5d3244 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -14,28 +14,24 @@ use self::MemberDescriptionFactory::*; use self::EnumDiscriminantInfo::*; use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of, - get_namespace_and_span_for_item, create_DIArray, - fn_should_be_ignored, is_node_local_to_unit}; + get_namespace_and_span_for_item, create_DIArray, is_node_local_to_unit}; use super::namespace::mangled_name_of_item; use super::type_names::{compute_debuginfo_type_name, push_debuginfo_type_name}; -use super::{declare_local, VariableKind, VariableAccess, CrateDebugContext}; +use super::{CrateDebugContext}; use context::SharedCrateContext; use session::Session; use llvm::{self, ValueRef}; -use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType}; +use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType, DILexicalBlock}; use rustc::hir::def_id::DefId; -use rustc::hir::pat_util; -use rustc::ty::subst; -use rustc::hir::map as hir_map; -use rustc::hir::{self, PatKind}; -use {type_of, adt, machine, monomorphize}; -use common::{self, CrateContext, FunctionContext, Block}; -use _match::{BindingInfo, TransBindingMode}; +use rustc::ty::subst::Substs; +use rustc::hir; +use {type_of, machine, monomorphize}; +use common::CrateContext; use type_::Type; -use rustc::ty::{self, Ty}; -use session::config::{self, FullDebugInfo}; +use rustc::ty::{self, AdtKind, Ty, layout}; +use session::config; use util::nodemap::FnvHashMap; use util::common::path2cstr; @@ -44,7 +40,6 @@ use std::ffi::CString; use std::path::Path; use std::ptr; use std::rc::Rc; -use syntax; use syntax::util::interner::Interner; use syntax::ast; use syntax::parse::token; @@ -180,14 +175,10 @@ impl<'tcx> TypeMap<'tcx> { ty::TyFloat(_) => { push_debuginfo_type_name(cx, type_, false, &mut unique_type_id); }, - ty::TyEnum(def, substs) => { - unique_type_id.push_str("enum "); + ty::TyAdt(def, substs) => { + unique_type_id.push_str(&(String::from(def.descr()) + " ")); from_def_id_and_substs(self, cx, def.did, substs, &mut unique_type_id); - }, - ty::TyStruct(def, substs) => { - unique_type_id.push_str("struct "); - from_def_id_and_substs(self, cx, def.did, substs, &mut unique_type_id); - }, + } ty::TyTuple(component_types) if component_types.is_empty() => { push_debuginfo_type_name(cx, type_, false, &mut unique_type_id); }, @@ -244,7 +235,8 @@ impl<'tcx> TypeMap<'tcx> { ty::TyTrait(ref trait_data) => { unique_type_id.push_str("trait "); - let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal); + let principal = cx.tcx().erase_late_bound_regions_and_normalize( + &trait_data.principal); from_def_id_and_substs(self, cx, @@ -252,7 +244,7 @@ impl<'tcx> TypeMap<'tcx> { principal.substs, &mut unique_type_id); }, - ty::TyFnDef(_, _, &ty::BareFnTy{ unsafety, abi, ref sig } ) | + ty::TyFnDef(.., &ty::BareFnTy{ unsafety, abi, ref sig } ) | ty::TyFnPtr(&ty::BareFnTy{ unsafety, abi, ref sig } ) => { if unsafety == hir::Unsafety::Unsafe { unique_type_id.push_str("unsafe "); @@ -262,8 +254,7 @@ impl<'tcx> TypeMap<'tcx> { unique_type_id.push_str(" fn("); - let sig = cx.tcx().erase_late_bound_regions(sig); - let sig = cx.tcx().normalize_associated_type(&sig); + let sig = cx.tcx().erase_late_bound_regions_and_normalize(sig); for ¶meter_type in &sig.inputs { let parameter_type_id = @@ -315,7 +306,7 @@ impl<'tcx> TypeMap<'tcx> { fn from_def_id_and_substs<'a, 'tcx>(type_map: &mut TypeMap<'tcx>, cx: &CrateContext<'a, 'tcx>, def_id: DefId, - substs: &subst::Substs<'tcx>, + substs: &Substs<'tcx>, output: &mut String) { // First, find out the 'real' def_id of the type. Items inlined from // other crates have to be mapped back to their source. @@ -346,11 +337,10 @@ impl<'tcx> TypeMap<'tcx> { // Add the def-index as the second part output.push_str(&format!("{:x}", def_id.index.as_usize())); - let tps = substs.types.get_slice(subst::TypeSpace); - if !tps.is_empty() { + if substs.types().next().is_some() { output.push('<'); - for &type_parameter in tps { + for type_parameter in substs.types() { let param_type_id = type_map.get_unique_type_id_of_type(cx, type_parameter); let param_type_id = @@ -627,7 +617,7 @@ fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // But it does not describe the trait's methods. let def_id = match trait_type.sty { - ty::TyTrait(ref data) => data.principal_def_id(), + ty::TyTrait(ref data) => data.principal.def_id(), _ => { bug!("debuginfo: Unexpected trait-object type in \ trait_pointer_metadata(): {:?}", @@ -706,13 +696,6 @@ pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty::TyTuple(ref elements) if elements.is_empty() => { MetadataCreationResult::new(basic_type_metadata(cx, t), false) } - ty::TyEnum(def, _) => { - prepare_enum_metadata(cx, - t, - def.did, - unique_type_id, - usage_site_span).finalize(cx) - } ty::TyArray(typ, len) => { fixed_vec_metadata(cx, unique_type_id, typ, Some(len as u64), usage_site_span) } @@ -757,7 +740,7 @@ pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } } - ty::TyFnDef(_, _, ref barefnty) | ty::TyFnPtr(ref barefnty) => { + ty::TyFnDef(.., ref barefnty) | ty::TyFnPtr(ref barefnty) => { let fn_metadata = subroutine_type_metadata(cx, unique_type_id, &barefnty.sig, @@ -780,12 +763,27 @@ pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, unique_type_id, usage_site_span).finalize(cx) } - ty::TyStruct(..) => { - prepare_struct_metadata(cx, + ty::TyAdt(def, ..) => match def.adt_kind() { + AdtKind::Struct => { + prepare_struct_metadata(cx, + t, + unique_type_id, + usage_site_span).finalize(cx) + } + AdtKind::Union => { + prepare_union_metadata(cx, t, unique_type_id, usage_site_span).finalize(cx) - } + } + AdtKind::Enum => { + prepare_enum_metadata(cx, + t, + def.did, + unique_type_id, + usage_site_span).finalize(cx) + } + }, ty::TyTuple(ref elements) => { prepare_tuple_metadata(cx, t, @@ -886,26 +884,6 @@ fn file_metadata_(cx: &CrateContext, key: &str, file_name: &str, work_dir: &str) file_metadata } -/// Finds the scope metadata node for the given AST node. -pub fn scope_metadata(fcx: &FunctionContext, - node_id: ast::NodeId, - error_reporting_span: Span) - -> DIScope { - let scope_map = &fcx.debug_context - .get_ref(error_reporting_span) - .scope_map; - match scope_map.borrow().get(&node_id).cloned() { - Some(scope_metadata) => scope_metadata, - None => { - let node = fcx.ccx.tcx().map.get(node_id); - - span_bug!(error_reporting_span, - "debuginfo: Could not find scope info for node {:?}", - node); - } - } -} - fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> DIType { @@ -1056,6 +1034,7 @@ enum MemberDescriptionFactory<'tcx> { StructMDF(StructMemberDescriptionFactory<'tcx>), TupleMDF(TupleMemberDescriptionFactory<'tcx>), EnumMDF(EnumMemberDescriptionFactory<'tcx>), + UnionMDF(UnionMemberDescriptionFactory<'tcx>), VariantMDF(VariantMemberDescriptionFactory<'tcx>) } @@ -1072,6 +1051,9 @@ impl<'tcx> MemberDescriptionFactory<'tcx> { EnumMDF(ref this) => { this.create_member_descriptions(cx) } + UnionMDF(ref this) => { + this.create_member_descriptions(cx) + } VariantMDF(ref this) => { this.create_member_descriptions(cx) } @@ -1086,7 +1068,7 @@ impl<'tcx> MemberDescriptionFactory<'tcx> { // Creates MemberDescriptions for the fields of a struct struct StructMemberDescriptionFactory<'tcx> { variant: ty::VariantDef<'tcx>, - substs: &'tcx subst::Substs<'tcx>, + substs: &'tcx Substs<'tcx>, is_simd: bool, span: Span, } @@ -1145,8 +1127,8 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let struct_llvm_type = type_of::in_memory_type_of(cx, struct_type); let (struct_def_id, variant, substs) = match struct_type.sty { - ty::TyStruct(def, substs) => (def.did, def.struct_variant(), substs), - _ => bug!("prepare_struct_metadata on a non-struct") + ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs), + _ => bug!("prepare_struct_metadata on a non-ADT") }; let (containing_scope, _) = get_namespace_and_span_for_item(cx, struct_def_id); @@ -1172,7 +1154,6 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ) } - //=----------------------------------------------------------------------------- // Tuples //=----------------------------------------------------------------------------- @@ -1227,6 +1208,66 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ) } +//=----------------------------------------------------------------------------- +// Unions +//=----------------------------------------------------------------------------- + +struct UnionMemberDescriptionFactory<'tcx> { + variant: ty::VariantDef<'tcx>, + substs: &'tcx Substs<'tcx>, + span: Span, +} + +impl<'tcx> UnionMemberDescriptionFactory<'tcx> { + fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) + -> Vec { + self.variant.fields.iter().map(|field| { + let fty = monomorphize::field_ty(cx.tcx(), self.substs, field); + MemberDescription { + name: field.name.to_string(), + llvm_type: type_of::type_of(cx, fty), + type_metadata: type_metadata(cx, fty, self.span), + offset: FixedMemberOffset { bytes: 0 }, + flags: FLAGS_NONE, + } + }).collect() + } +} + +fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + union_type: Ty<'tcx>, + unique_type_id: UniqueTypeId, + span: Span) + -> RecursiveTypeDescription<'tcx> { + let union_name = compute_debuginfo_type_name(cx, union_type, false); + let union_llvm_type = type_of::in_memory_type_of(cx, union_type); + + let (union_def_id, variant, substs) = match union_type.sty { + ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs), + _ => bug!("prepare_union_metadata on a non-ADT") + }; + + let (containing_scope, _) = get_namespace_and_span_for_item(cx, union_def_id); + + let union_metadata_stub = create_union_stub(cx, + union_llvm_type, + &union_name, + unique_type_id, + containing_scope); + + create_and_register_recursive_type_forward_declaration( + cx, + union_type, + unique_type_id, + union_metadata_stub, + union_llvm_type, + UnionMDF(UnionMemberDescriptionFactory { + variant: variant, + substs: substs, + span: span, + }) + ) +} //=----------------------------------------------------------------------------- // Enums @@ -1239,7 +1280,7 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // offset of zero bytes). struct EnumMemberDescriptionFactory<'tcx> { enum_type: Ty<'tcx>, - type_rep: Rc>, + type_rep: &'tcx layout::Layout, discriminant_type_metadata: Option, containing_scope: DIScope, file_metadata: DIFile, @@ -1250,11 +1291,15 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let adt = &self.enum_type.ty_adt_def().unwrap(); + let substs = match self.enum_type.sty { + ty::TyAdt(def, ref s) if def.adt_kind() == AdtKind::Enum => s, + _ => bug!("{} is not an enum", self.enum_type) + }; match *self.type_rep { - adt::General(_, ref struct_defs, _) => { + layout::General { ref variants, .. } => { let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata .expect("")); - struct_defs + variants .iter() .enumerate() .map(|(i, struct_def)| { @@ -1285,7 +1330,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } }).collect() }, - adt::Univariant(ref struct_def, _) => { + layout::Univariant{ ref variant, .. } => { assert!(adt.variants.len() <= 1); if adt.variants.is_empty() { @@ -1296,7 +1341,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { member_description_factory) = describe_enum_variant(cx, self.enum_type, - struct_def, + variant, &adt.variants[0], NoDiscriminant, self.containing_scope, @@ -1320,16 +1365,17 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { ] } } - adt::RawNullablePointer { nndiscr: non_null_variant_index, nnty, .. } => { + layout::RawNullablePointer { nndiscr: non_null_variant_index, .. } => { // As far as debuginfo is concerned, the pointer this enum // represents is still wrapped in a struct. This is to make the // DWARF representation of enums uniform. // First create a description of the artificial wrapper struct: - let non_null_variant = &adt.variants[non_null_variant_index.0 as usize]; + let non_null_variant = &adt.variants[non_null_variant_index as usize]; let non_null_variant_name = non_null_variant.name.as_str(); // The llvm type and metadata of the pointer + let nnty = monomorphize::field_ty(cx.tcx(), &substs, &non_null_variant.fields[0] ); let non_null_llvm_type = type_of::type_of(cx, nnty); let non_null_type_metadata = type_metadata(cx, nnty, self.span); @@ -1374,7 +1420,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // Encode the information about the null variant in the union // member's name. - let null_variant_index = (1 - non_null_variant_index.0) as usize; + let null_variant_index = (1 - non_null_variant_index) as usize; let null_variant_name = adt.variants[null_variant_index].name; let union_member_name = format!("RUST$ENCODED$ENUM${}${}", 0, @@ -1392,7 +1438,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } ] }, - adt::StructWrappedNullablePointer { nonnull: ref struct_def, + layout::StructWrappedNullablePointer { nonnull: ref struct_def, nndiscr, ref discrfield, ..} => { // Create a description of the non-null variant @@ -1400,7 +1446,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { describe_enum_variant(cx, self.enum_type, struct_def, - &adt.variants[nndiscr.0 as usize], + &adt.variants[nndiscr as usize], OptimizedDiscriminant, self.containing_scope, self.span); @@ -1415,7 +1461,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // Encode the information about the null variant in the union // member's name. - let null_variant_index = (1 - nndiscr.0) as usize; + let null_variant_index = (1 - nndiscr) as usize; let null_variant_name = adt.variants[null_variant_index].name; let discrfield = discrfield.iter() .skip(1) @@ -1436,7 +1482,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } ] }, - adt::CEnum(..) => span_bug!(self.span, "This should be unreachable.") + layout::CEnum { .. } => span_bug!(self.span, "This should be unreachable."), + ref l @ _ => bug!("Not an enum layout: {:#?}", l) } } } @@ -1479,16 +1526,39 @@ enum EnumDiscriminantInfo { // full RecursiveTypeDescription. fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_type: Ty<'tcx>, - struct_def: &adt::Struct<'tcx>, + struct_def: &layout::Struct, variant: ty::VariantDef<'tcx>, discriminant_info: EnumDiscriminantInfo, containing_scope: DIScope, span: Span) -> (DICompositeType, Type, MemberDescriptionFactory<'tcx>) { + let substs = match enum_type.sty { + ty::TyAdt(def, s) if def.adt_kind() == AdtKind::Enum => s, + ref t @ _ => bug!("{:#?} is not an enum", t) + }; + + let maybe_discr_and_signed: Option<(layout::Integer, bool)> = match *cx.layout_of(enum_type) { + layout::CEnum {discr, ..} => Some((discr, true)), + layout::General{discr, ..} => Some((discr, false)), + layout::Univariant { .. } + | layout::RawNullablePointer { .. } + | layout::StructWrappedNullablePointer { .. } => None, + ref l @ _ => bug!("This should be unreachable. Type is {:#?} layout is {:#?}", enum_type, l) + }; + + let mut field_tys = variant.fields.iter().map(|f: ty::FieldDef<'tcx>| { + monomorphize::field_ty(cx.tcx(), &substs, f) + }).collect::>(); + + if let Some((discr, signed)) = maybe_discr_and_signed { + field_tys.insert(0, discr.to_ty(&cx.tcx(), signed)); + } + + let variant_llvm_type = - Type::struct_(cx, &struct_def.fields + Type::struct_(cx, &field_tys .iter() - .map(|&t| type_of::type_of(cx, t)) + .map(|t| type_of::type_of(cx, t)) .collect::>() , struct_def.packed); @@ -1534,7 +1604,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // Build an array of (field name, field type) pairs to be captured in the factory closure. let args: Vec<(String, Ty)> = arg_names.iter() - .zip(&struct_def.fields) + .zip(field_tys.iter()) .map(|(s, &t)| (s.to_string(), t)) .collect(); @@ -1571,7 +1641,6 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let file_metadata = unknown_file_metadata(cx); let variants = &enum_type.ty_adt_def().unwrap().variants; - let enumerators_metadata: Vec = variants .iter() .map(|v| { @@ -1586,7 +1655,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }) .collect(); - let discriminant_type_metadata = |inttype: syntax::attr::IntType| { + let discriminant_type_metadata = |inttype: layout::Integer, signed: bool| { let disr_type_key = (enum_def_id, inttype); let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types .borrow() @@ -1594,12 +1663,12 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, match cached_discriminant_type_metadata { Some(discriminant_type_metadata) => discriminant_type_metadata, None => { - let discriminant_llvm_type = adt::ll_inttype(cx, inttype); + let discriminant_llvm_type = Type::from_integer(cx, inttype); let (discriminant_size, discriminant_align) = size_and_align_of(cx, discriminant_llvm_type); let discriminant_base_type_metadata = type_metadata(cx, - adt::ty_of_inttype(cx.tcx(), inttype), + inttype.to_ty(&cx.tcx(), signed), syntax_pos::DUMMY_SP); let discriminant_name = get_enum_discriminant_name(cx, enum_def_id); @@ -1626,16 +1695,17 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } }; - let type_rep = adt::represent_type(cx, enum_type); + let type_rep = cx.layout_of(enum_type); let discriminant_type_metadata = match *type_rep { - adt::CEnum(inttype, _, _) => { - return FinalMetadata(discriminant_type_metadata(inttype)) + layout::CEnum { discr, signed, .. } => { + return FinalMetadata(discriminant_type_metadata(discr, signed)) }, - adt::RawNullablePointer { .. } | - adt::StructWrappedNullablePointer { .. } | - adt::Univariant(..) => None, - adt::General(inttype, _, _) => Some(discriminant_type_metadata(inttype)), + layout::RawNullablePointer { .. } | + layout::StructWrappedNullablePointer { .. } | + layout::Univariant { .. } => None, + layout::General { discr, .. } => Some(discriminant_type_metadata(discr, false)), + ref l @ _ => bug!("Not an enum layout: {:#?}", l) }; let enum_llvm_type = type_of::type_of(cx, enum_type); @@ -1671,7 +1741,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_llvm_type, EnumMDF(EnumMemberDescriptionFactory { enum_type: enum_type, - type_rep: type_rep.clone(), + type_rep: type_rep, discriminant_type_metadata: discriminant_type_metadata, containing_scope: containing_scope, file_metadata: file_metadata, @@ -1814,6 +1884,42 @@ fn create_struct_stub(cx: &CrateContext, return metadata_stub; } +fn create_union_stub(cx: &CrateContext, + union_llvm_type: Type, + union_type_name: &str, + unique_type_id: UniqueTypeId, + containing_scope: DIScope) + -> DICompositeType { + let (union_size, union_align) = size_and_align_of(cx, union_llvm_type); + + let unique_type_id_str = debug_context(cx).type_map + .borrow() + .get_unique_type_id_as_string(unique_type_id); + let name = CString::new(union_type_name).unwrap(); + let unique_type_id = CString::new(unique_type_id_str.as_bytes()).unwrap(); + let metadata_stub = unsafe { + // LLVMRustDIBuilderCreateUnionType() wants an empty array. A null + // pointer will lead to hard to trace and debug LLVM assertions + // later on in llvm/lib/IR/Value.cpp. + let empty_array = create_DIArray(DIB(cx), &[]); + + llvm::LLVMRustDIBuilderCreateUnionType( + DIB(cx), + containing_scope, + name.as_ptr(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + bytes_to_bits(union_size), + bytes_to_bits(union_align), + 0, // Flags + empty_array, + 0, // RuntimeLang + unique_type_id.as_ptr()) + }; + + return metadata_stub; +} + /// Creates debug information for the given global variable. /// /// Adds the created metadata nodes directly to the crate's IR. @@ -1864,225 +1970,16 @@ pub fn create_global_var_metadata(cx: &CrateContext, } } -/// Creates debug information for the given local variable. -/// -/// This function assumes that there's a datum for each pattern component of the -/// local in `bcx.fcx.lllocals`. -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_local_var_metadata(bcx: Block, local: &hir::Local) { - if bcx.unreachable.get() || - fn_should_be_ignored(bcx.fcx) || - bcx.sess().opts.debuginfo != FullDebugInfo { - return; - } - - let locals = bcx.fcx.lllocals.borrow(); - pat_util::pat_bindings(&local.pat, |_, node_id, span, var_name| { - let datum = match locals.get(&node_id) { - Some(datum) => datum, - None => { - span_bug!(span, - "no entry in lllocals table for {}", - node_id); - } - }; - - if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() { - span_bug!(span, "debuginfo::create_local_var_metadata() - \ - Referenced variable location is not an alloca!"); - } - - let scope_metadata = scope_metadata(bcx.fcx, node_id, span); - - declare_local(bcx, - var_name.node, - datum.ty, - scope_metadata, - VariableAccess::DirectVariable { alloca: datum.val }, - VariableKind::LocalVariable, - span); - }) -} - -/// Creates debug information for a variable captured in a closure. -/// -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_captured_var_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - node_id: ast::NodeId, - env_pointer: ValueRef, - env_index: usize, - captured_by_ref: bool, - span: Span) { - if bcx.unreachable.get() || - fn_should_be_ignored(bcx.fcx) || - bcx.sess().opts.debuginfo != FullDebugInfo { - return; - } - - let cx = bcx.ccx(); - - let ast_item = cx.tcx().map.find(node_id); - - let variable_name = match ast_item { - None => { - span_bug!(span, "debuginfo::create_captured_var_metadata: node not found"); - } - Some(hir_map::NodeLocal(pat)) => { - match pat.node { - PatKind::Binding(_, ref path1, _) => { - path1.node - } - _ => { - span_bug!(span, - "debuginfo::create_captured_var_metadata() - \ - Captured var-id refers to unexpected \ - hir_map variant: {:?}", - ast_item); - } - } - } - _ => { - span_bug!(span, - "debuginfo::create_captured_var_metadata() - \ - Captured var-id refers to unexpected \ - hir_map variant: {:?}", - ast_item); - } - }; - - let variable_type = common::node_id_type(bcx, node_id); - let scope_metadata = bcx.fcx.debug_context.get_ref(span).fn_metadata; - - // env_pointer is the alloca containing the pointer to the environment, - // so it's type is **EnvironmentType. In order to find out the type of - // the environment we have to "dereference" two times. - let llvm_env_data_type = common::val_ty(env_pointer).element_type() - .element_type(); - let byte_offset_of_var_in_env = machine::llelement_offset(cx, - llvm_env_data_type, - env_index); - - let address_operations = unsafe { - [llvm::LLVMRustDIBuilderCreateOpDeref(), - llvm::LLVMRustDIBuilderCreateOpPlus(), - byte_offset_of_var_in_env as i64, - llvm::LLVMRustDIBuilderCreateOpDeref()] - }; - - let address_op_count = if captured_by_ref { - address_operations.len() - } else { - address_operations.len() - 1 - }; - - let variable_access = VariableAccess::IndirectVariable { - alloca: env_pointer, - address_operations: &address_operations[..address_op_count] - }; - - declare_local(bcx, - variable_name, - variable_type, - scope_metadata, - variable_access, - VariableKind::CapturedVariable, - span); -} - -/// Creates debug information for a local variable introduced in the head of a -/// match-statement arm. -/// -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_match_binding_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - variable_name: ast::Name, - binding: BindingInfo<'tcx>) { - if bcx.unreachable.get() || - fn_should_be_ignored(bcx.fcx) || - bcx.sess().opts.debuginfo != FullDebugInfo { - return; - } - - let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span); - let aops = unsafe { - [llvm::LLVMRustDIBuilderCreateOpDeref()] - }; - // Regardless of the actual type (`T`) we're always passed the stack slot - // (alloca) for the binding. For ByRef bindings that's a `T*` but for ByMove - // bindings we actually have `T**`. So to get the actual variable we need to - // dereference once more. For ByCopy we just use the stack slot we created - // for the binding. - let var_access = match binding.trmode { - TransBindingMode::TrByCopy(llbinding) | - TransBindingMode::TrByMoveIntoCopy(llbinding) => VariableAccess::DirectVariable { - alloca: llbinding - }, - TransBindingMode::TrByMoveRef => VariableAccess::IndirectVariable { - alloca: binding.llmatch, - address_operations: &aops - }, - TransBindingMode::TrByRef => VariableAccess::DirectVariable { - alloca: binding.llmatch - } - }; - - declare_local(bcx, - variable_name, - binding.ty, - scope_metadata, - var_access, - VariableKind::LocalVariable, - binding.span); -} - -/// Creates debug information for the given function argument. -/// -/// This function assumes that there's a datum for each pattern component of the -/// argument in `bcx.fcx.lllocals`. -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_argument_metadata(bcx: Block, arg: &hir::Arg) { - if bcx.unreachable.get() || - fn_should_be_ignored(bcx.fcx) || - bcx.sess().opts.debuginfo != FullDebugInfo { - return; +// Creates an "extension" of an existing DIScope into another file. +pub fn extend_scope_to_file(ccx: &CrateContext, + scope_metadata: DIScope, + file: &syntax_pos::FileMap) + -> DILexicalBlock { + let file_metadata = file_metadata(ccx, &file.name, &file.abs_path); + unsafe { + llvm::LLVMRustDIBuilderCreateLexicalBlockFile( + DIB(ccx), + scope_metadata, + file_metadata) } - - let scope_metadata = bcx - .fcx - .debug_context - .get_ref(arg.pat.span) - .fn_metadata; - let locals = bcx.fcx.lllocals.borrow(); - - pat_util::pat_bindings(&arg.pat, |_, node_id, span, var_name| { - let datum = match locals.get(&node_id) { - Some(v) => v, - None => { - span_bug!(span, "no entry in lllocals table for {}", node_id); - } - }; - - if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() { - span_bug!(span, "debuginfo::create_argument_metadata() - \ - Referenced variable location is not an alloca!"); - } - - let argument_index = { - let counter = &bcx - .fcx - .debug_context - .get_ref(span) - .argument_counter; - let argument_index = counter.get(); - counter.set(argument_index + 1); - argument_index - }; - - declare_local(bcx, - var_name.node, - datum.ty, - scope_metadata, - VariableAccess::DirectVariable { alloca: datum.val }, - VariableKind::ArgumentVariable(argument_index), - span); - }) } diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index d6a4ce3c43..a23fd3ab8b 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -27,15 +27,14 @@ use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilderRef, DISubprogram, DIArr use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use rustc::ty::subst::Substs; -use rustc::hir; use abi::Abi; -use common::{NodeIdAndSpan, CrateContext, FunctionContext, Block, BlockAndBuilder}; -use inline; +use common::{CrateContext, FunctionContext, Block, BlockAndBuilder}; use monomorphize::{self, Instance}; use rustc::ty::{self, Ty}; +use rustc::mir::repr as mir; use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo}; -use util::nodemap::{DefIdMap, NodeMap, FnvHashMap, FnvHashSet}; +use util::nodemap::{DefIdMap, FnvHashMap, FnvHashSet}; use libc::c_uint; use std::cell::{Cell, RefCell}; @@ -44,7 +43,7 @@ use std::ptr; use syntax_pos::{self, Span, Pos}; use syntax::ast; -use syntax::attr::IntType; +use rustc::ty::layout; pub mod gdb; mod utils; @@ -54,15 +53,10 @@ pub mod metadata; mod create_scope_map; mod source_loc; -pub use self::create_scope_map::create_mir_scopes; +pub use self::create_scope_map::{create_mir_scopes, MirDebugScope}; pub use self::source_loc::start_emitting_source_locations; -pub use self::source_loc::get_cleanup_debug_loc_for_ast_node; -pub use self::source_loc::with_source_location_override; -pub use self::metadata::create_match_binding_metadata; -pub use self::metadata::create_argument_metadata; -pub use self::metadata::create_captured_var_metadata; pub use self::metadata::create_global_var_metadata; -pub use self::metadata::create_local_var_metadata; +pub use self::metadata::extend_scope_to_file; #[allow(non_upper_case_globals)] const DW_TAG_auto_variable: c_uint = 0x100; @@ -75,7 +69,7 @@ pub struct CrateDebugContext<'tcx> { builder: DIBuilderRef, current_debug_location: Cell, created_files: RefCell>, - created_enum_disr_types: RefCell>, + created_enum_disr_types: RefCell>, type_map: RefCell>, namespace_map: RefCell>, @@ -140,9 +134,7 @@ impl FunctionDebugContext { } pub struct FunctionDebugContextData { - scope_map: RefCell>, fn_metadata: DISubprogram, - argument_counter: Cell, source_locations_enabled: Cell, source_location_override: Cell, } @@ -229,7 +221,8 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>, sig: &ty::FnSig<'tcx>, abi: Abi, - llfn: ValueRef) -> FunctionDebugContext { + llfn: ValueRef, + mir: &mir::Mir) -> FunctionDebugContext { if cx.sess().opts.debuginfo == NoDebugInfo { return FunctionDebugContext::DebugInfoDisabled; } @@ -238,8 +231,8 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // Do this here already, in case we do an early exit from this function. source_loc::set_debug_location(cx, None, UnknownLocation); - let instance = inline::maybe_inline_instance(cx, instance); - let (containing_scope, span) = get_containing_scope_and_span(cx, instance); + let containing_scope = get_containing_scope(cx, instance); + let span = mir.span; // This can be the case for functions inlined from another crate if span == syntax_pos::DUMMY_SP { @@ -266,7 +259,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // Get_template_parameters() will append a `<...>` clause to the function // name if necessary. - let generics = cx.tcx().lookup_item_type(fn_def_id).generics; + let generics = cx.tcx().lookup_generics(fn_def_id); let template_parameters = get_template_parameters(cx, &generics, instance.substs, @@ -305,9 +298,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // Initialize fn debug context (including scope map and namespace map) let fn_debug_context = box FunctionDebugContextData { - scope_map: RefCell::new(NodeMap()), fn_metadata: fn_metadata, - argument_counter: Cell::new(1), source_locations_enabled: Cell::new(false), source_location_override: Cell::new(false), }; @@ -353,38 +344,37 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fn get_template_parameters<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, generics: &ty::Generics<'tcx>, - param_substs: &Substs<'tcx>, + substs: &Substs<'tcx>, file_metadata: DIFile, name_to_append_suffix_to: &mut String) -> DIArray { - let actual_types = param_substs.types.as_slice(); - - if actual_types.is_empty() { + if substs.types().next().is_none() { return create_DIArray(DIB(cx), &[]); } name_to_append_suffix_to.push('<'); - for (i, &actual_type) in actual_types.iter().enumerate() { + for (i, actual_type) in substs.types().enumerate() { + if i != 0 { + name_to_append_suffix_to.push_str(","); + } + let actual_type = cx.tcx().normalize_associated_type(&actual_type); // Add actual type name to <...> clause of function name let actual_type_name = compute_debuginfo_type_name(cx, actual_type, true); name_to_append_suffix_to.push_str(&actual_type_name[..]); - - if i != actual_types.len() - 1 { - name_to_append_suffix_to.push_str(","); - } } name_to_append_suffix_to.push('>'); // Again, only create type information if full debuginfo is enabled let template_params: Vec<_> = if cx.sess().opts.debuginfo == FullDebugInfo { - generics.types.as_slice().iter().enumerate().map(|(i, param)| { - let actual_type = cx.tcx().normalize_associated_type(&actual_types[i]); + let names = get_type_parameter_names(cx, generics); + substs.types().zip(names).map(|(ty, name)| { + let actual_type = cx.tcx().normalize_associated_type(&ty); let actual_type_metadata = type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); - let name = CString::new(param.name.as_str().as_bytes()).unwrap(); + let name = CString::new(name.as_str().as_bytes()).unwrap(); unsafe { llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( DIB(cx), @@ -403,9 +393,19 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, return create_DIArray(DIB(cx), &template_params[..]); } - fn get_containing_scope_and_span<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>, - instance: Instance<'tcx>) - -> (DIScope, Span) { + fn get_type_parameter_names<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + generics: &ty::Generics<'tcx>) + -> Vec { + let mut names = generics.parent.map_or(vec![], |def_id| { + get_type_parameter_names(cx, cx.tcx().lookup_generics(def_id)) + }); + names.extend(generics.types.iter().map(|param| param.name)); + names + } + + fn get_containing_scope<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>, + instance: Instance<'tcx>) + -> DIScope { // First, let's see if this is a method within an inherent impl. Because // if yes, we want to make the result subroutine DIE a child of the // subroutine's self-type. @@ -414,10 +414,18 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, if cx.tcx().trait_id_of_impl(impl_def_id).is_none() { let impl_self_ty = cx.tcx().lookup_item_type(impl_def_id).ty; let impl_self_ty = cx.tcx().erase_regions(&impl_self_ty); - let impl_self_ty = monomorphize::apply_param_substs(cx.tcx(), + let impl_self_ty = monomorphize::apply_param_substs(cx.shared(), instance.substs, &impl_self_ty); - Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP)) + + // Only "class" methods are generally understood by LLVM, + // so avoid methods on other types (e.g. `<*mut T>::null`). + match impl_self_ty.sty { + ty::TyAdt(..) => { + Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP)) + } + _ => None + } } else { // For trait method impls we still use the "parallel namespace" // strategy @@ -425,41 +433,15 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } }); - let containing_scope = self_type.unwrap_or_else(|| { + self_type.unwrap_or_else(|| { namespace::item_namespace(cx, DefId { krate: instance.def.krate, index: cx.tcx() .def_key(instance.def) .parent - .expect("get_containing_scope_and_span: missing parent?") + .expect("get_containing_scope: missing parent?") }) - }); - - // Try to get some span information, if we have an inlined item. - let definition_span = cx.tcx() - .map - .def_id_span(instance.def, syntax_pos::DUMMY_SP); - - (containing_scope, definition_span) - } -} - -/// Computes the scope map for a function given its declaration and body. -pub fn fill_scope_map_for_function<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, - fn_decl: &hir::FnDecl, - top_level_block: &hir::Block, - fn_ast_id: ast::NodeId) { - match fcx.debug_context { - FunctionDebugContext::RegularContext(box ref data) => { - let scope_map = create_scope_map::create_scope_map(fcx.ccx, - &fn_decl.inputs, - top_level_block, - data.fn_metadata, - fn_ast_id); - *data.scope_map.borrow_mut() = scope_map; - } - FunctionDebugContext::DebugInfoDisabled | - FunctionDebugContext::FunctionWithoutDebugInfo => {} + }) } } @@ -537,7 +519,6 @@ pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum DebugLoc { - At(ast::NodeId, Span), ScopeAt(DIScope, Span), None } @@ -551,28 +532,3 @@ impl DebugLoc { source_loc::set_source_location(bcx.fcx(), Some(bcx), self); } } - -pub trait ToDebugLoc { - fn debug_loc(&self) -> DebugLoc; -} - -impl ToDebugLoc for hir::Expr { - fn debug_loc(&self) -> DebugLoc { - DebugLoc::At(self.id, self.span) - } -} - -impl ToDebugLoc for NodeIdAndSpan { - fn debug_loc(&self) -> DebugLoc { - DebugLoc::At(self.id, self.span) - } -} - -impl ToDebugLoc for Option { - fn debug_loc(&self) -> DebugLoc { - match *self { - Some(NodeIdAndSpan { id, span }) => DebugLoc::At(id, span), - None => DebugLoc::None - } - } -} diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index d288b9dcef..1aee27c144 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -11,79 +11,17 @@ use self::InternalDebugLocation::*; use super::utils::{debug_context, span_start}; -use super::metadata::{scope_metadata,UNKNOWN_COLUMN_NUMBER}; +use super::metadata::{UNKNOWN_COLUMN_NUMBER}; use super::{FunctionDebugContext, DebugLoc}; use llvm; use llvm::debuginfo::DIScope; use builder::Builder; -use common::{NodeIdAndSpan, CrateContext, FunctionContext}; +use common::{CrateContext, FunctionContext}; use libc::c_uint; use std::ptr; -use syntax_pos::{self, Span, Pos}; -use syntax::ast; - -pub fn get_cleanup_debug_loc_for_ast_node<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - node_id: ast::NodeId, - node_span: Span, - is_block: bool) - -> NodeIdAndSpan { - // A debug location needs two things: - // (1) A span (of which only the beginning will actually be used) - // (2) An AST node-id which will be used to look up the lexical scope - // for the location in the functions scope-map - // - // This function will calculate the debug location for compiler-generated - // cleanup calls that are executed when control-flow leaves the - // scope identified by `node_id`. - // - // For everything but block-like things we can simply take id and span of - // the given expression, meaning that from a debugger's view cleanup code is - // executed at the same source location as the statement/expr itself. - // - // Blocks are a special case. Here we want the cleanup to be linked to the - // closing curly brace of the block. The *scope* the cleanup is executed in - // is up to debate: It could either still be *within* the block being - // cleaned up, meaning that locals from the block are still visible in the - // debugger. - // Or it could be in the scope that the block is contained in, so any locals - // from within the block are already considered out-of-scope and thus not - // accessible in the debugger anymore. - // - // The current implementation opts for the second option: cleanup of a block - // already happens in the parent scope of the block. The main reason for - // this decision is that scoping becomes controlflow dependent when variable - // shadowing is involved and it's impossible to decide statically which - // scope is actually left when the cleanup code is executed. - // In practice it shouldn't make much of a difference. - - let mut cleanup_span = node_span; - - if is_block { - // Not all blocks actually have curly braces (e.g. simple closure - // bodies), in which case we also just want to return the span of the - // whole expression. - let code_snippet = cx.sess().codemap().span_to_snippet(node_span); - if let Ok(code_snippet) = code_snippet { - let bytes = code_snippet.as_bytes(); - - if !bytes.is_empty() && &bytes[bytes.len()-1..] == b"}" { - cleanup_span = Span { - lo: node_span.hi - syntax_pos::BytePos(1), - hi: node_span.hi, - expn_id: node_span.expn_id - }; - } - } - } - - NodeIdAndSpan { - id: node_id, - span: cleanup_span - } -} - +use syntax_pos::Pos; /// Sets the current debug location at the beginning of the span. /// @@ -109,9 +47,6 @@ pub fn set_source_location(fcx: &FunctionContext, let dbg_loc = if function_debug_context.source_locations_enabled.get() { let (scope, span) = match debug_loc { - DebugLoc::At(node_id, span) => { - (scope_metadata(fcx, node_id, span), span) - } DebugLoc::ScopeAt(scope, span) => (scope, span), DebugLoc::None => { set_debug_location(fcx.ccx, builder, UnknownLocation); @@ -129,35 +64,6 @@ pub fn set_source_location(fcx: &FunctionContext, set_debug_location(fcx.ccx, builder, dbg_loc); } -/// This function makes sure that all debug locations emitted while executing -/// `wrapped_function` are set to the given `debug_loc`. -pub fn with_source_location_override(fcx: &FunctionContext, - debug_loc: DebugLoc, - wrapped_function: F) -> R - where F: FnOnce() -> R -{ - match fcx.debug_context { - FunctionDebugContext::DebugInfoDisabled => { - wrapped_function() - } - FunctionDebugContext::FunctionWithoutDebugInfo => { - set_debug_location(fcx.ccx, None, UnknownLocation); - wrapped_function() - } - FunctionDebugContext::RegularContext(box ref function_debug_context) => { - if function_debug_context.source_location_override.get() { - wrapped_function() - } else { - debug_loc.apply(fcx); - function_debug_context.source_location_override.set(true); - let result = wrapped_function(); - function_debug_context.source_location_override.set(false); - result - } - } - } -} - /// Enables emitting source locations for the given functions. /// /// Since we don't want source locations to be emitted for the function prelude, diff --git a/src/librustc_trans/debuginfo/type_names.rs b/src/librustc_trans/debuginfo/type_names.rs index 73b1c82866..956402edc1 100644 --- a/src/librustc_trans/debuginfo/type_names.rs +++ b/src/librustc_trans/debuginfo/type_names.rs @@ -12,7 +12,7 @@ use common::CrateContext; use rustc::hir::def_id::DefId; -use rustc::ty::subst; +use rustc::ty::subst::Substs; use rustc::ty::{self, Ty}; use rustc::hir; @@ -44,8 +44,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty::TyInt(int_ty) => output.push_str(int_ty.ty_to_string()), ty::TyUint(uint_ty) => output.push_str(uint_ty.ty_to_string()), ty::TyFloat(float_ty) => output.push_str(float_ty.ty_to_string()), - ty::TyStruct(def, substs) | - ty::TyEnum(def, substs) => { + ty::TyAdt(def, substs) => { push_item_name(cx, def.did, qualified, output); push_type_params(cx, substs, output); }, @@ -95,11 +94,12 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, output.push(']'); }, ty::TyTrait(ref trait_data) => { - let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal); + let principal = cx.tcx().erase_late_bound_regions_and_normalize( + &trait_data.principal); push_item_name(cx, principal.def_id, false, output); push_type_params(cx, principal.substs, output); }, - ty::TyFnDef(_, _, &ty::BareFnTy{ unsafety, abi, ref sig } ) | + ty::TyFnDef(.., &ty::BareFnTy{ unsafety, abi, ref sig } ) | ty::TyFnPtr(&ty::BareFnTy{ unsafety, abi, ref sig } ) => { if unsafety == hir::Unsafety::Unsafe { output.push_str("unsafe "); @@ -113,8 +113,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, output.push_str("fn("); - let sig = cx.tcx().erase_late_bound_regions(sig); - let sig = cx.tcx().normalize_associated_type(&sig); + let sig = cx.tcx().erase_late_bound_regions_and_normalize(sig); if !sig.inputs.is_empty() { for ¶meter_type in &sig.inputs { push_debuginfo_type_name(cx, parameter_type, true, output); @@ -173,15 +172,15 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // would be possible but with inlining and LTO we have to use the least // common denominator - otherwise we would run into conflicts. fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - substs: &subst::Substs<'tcx>, + substs: &Substs<'tcx>, output: &mut String) { - if substs.types.is_empty() { + if substs.types().next().is_none() { return; } output.push('<'); - for &type_parameter in &substs.types { + for type_parameter in substs.types() { push_debuginfo_type_name(cx, type_parameter, true, output); output.push_str(", "); } diff --git a/src/librustc_trans/debuginfo/utils.rs b/src/librustc_trans/debuginfo/utils.rs index 5734a12394..3cdac485fe 100644 --- a/src/librustc_trans/debuginfo/utils.rs +++ b/src/librustc_trans/debuginfo/utils.rs @@ -10,7 +10,7 @@ // Utility Functions. -use super::{FunctionDebugContext, CrateDebugContext}; +use super::{CrateDebugContext}; use super::namespace::item_namespace; use rustc::hir::def_id::DefId; @@ -18,7 +18,7 @@ use rustc::hir::def_id::DefId; use llvm; use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray}; use machine; -use common::{CrateContext, FunctionContext}; +use common::{CrateContext}; use type_::Type; use syntax_pos::{self, Span}; @@ -70,13 +70,6 @@ pub fn DIB(cx: &CrateContext) -> DIBuilderRef { cx.dbg_cx().as_ref().unwrap().builder } -pub fn fn_should_be_ignored(fcx: &FunctionContext) -> bool { - match fcx.debug_context { - FunctionDebugContext::RegularContext(_) => false, - _ => true - } -} - pub fn get_namespace_and_span_for_item(cx: &CrateContext, def_id: DefId) -> (DIScope, Span) { let containing_scope = item_namespace(cx, DefId { diff --git a/src/librustc_trans/declare.rs b/src/librustc_trans/declare.rs index 4d9ee187ac..1ec5ca4a56 100644 --- a/src/librustc_trans/declare.rs +++ b/src/librustc_trans/declare.rs @@ -104,8 +104,7 @@ pub fn declare_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, fn_type: ty::Ty<'tcx>) -> ValueRef { debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type); let abi = fn_type.fn_abi(); - let sig = ccx.tcx().erase_late_bound_regions(fn_type.fn_sig()); - let sig = ccx.tcx().normalize_associated_type(&sig); + let sig = ccx.tcx().erase_late_bound_regions_and_normalize(fn_type.fn_sig()); debug!("declare_rust_fn (after region erasure) sig={:?}", sig); let fty = FnType::new(ccx, abi, &sig, &[]); @@ -164,7 +163,7 @@ pub fn define_internal_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, fn_type: ty::Ty<'tcx>) -> ValueRef { let llfn = define_fn(ccx, name, fn_type); - unsafe { llvm::LLVMSetLinkage(llfn, llvm::InternalLinkage) }; + unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) }; llfn } diff --git a/src/librustc_trans/diagnostics.rs b/src/librustc_trans/diagnostics.rs index f7f065a356..18d31448b1 100644 --- a/src/librustc_trans/diagnostics.rs +++ b/src/librustc_trans/diagnostics.rs @@ -23,8 +23,10 @@ extern "platform-intrinsic" { fn simd_add(a: T, b: T) -> T; } -unsafe { simd_add(0, 1); } -// error: invalid monomorphization of `simd_add` intrinsic +fn main() { + unsafe { simd_add(0, 1); } + // error: invalid monomorphization of `simd_add` intrinsic +} ``` The generic type has to be a SIMD type. Example: diff --git a/src/librustc_trans/expr.rs b/src/librustc_trans/expr.rs deleted file mode 100644 index 6c894ddad1..0000000000 --- a/src/librustc_trans/expr.rs +++ /dev/null @@ -1,2473 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Translation of Expressions -//! -//! The expr module handles translation of expressions. The most general -//! translation routine is `trans()`, which will translate an expression -//! into a datum. `trans_into()` is also available, which will translate -//! an expression and write the result directly into memory, sometimes -//! avoiding the need for a temporary stack slot. Finally, -//! `trans_to_lvalue()` is available if you'd like to ensure that the -//! result has cleanup scheduled. -//! -//! Internally, each of these functions dispatches to various other -//! expression functions depending on the kind of expression. We divide -//! up expressions into: -//! -//! - **Datum expressions:** Those that most naturally yield values. -//! Examples would be `22`, `box x`, or `a + b` (when not overloaded). -//! - **DPS expressions:** Those that most naturally write into a location -//! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`. -//! - **Statement expressions:** That that do not generate a meaningful -//! result. Examples would be `while { ... }` or `return 44`. -//! -//! Public entry points: -//! -//! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression, -//! storing the result into `dest`. This is the preferred form, if you -//! can manage it. -//! -//! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding -//! `Datum` with the result. You can then store the datum, inspect -//! the value, etc. This may introduce temporaries if the datum is a -//! structural type. -//! -//! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an -//! expression and ensures that the result has a cleanup associated with it, -//! creating a temporary stack slot if necessary. -//! -//! - `trans_var -> Datum`: looks up a local variable, upvar or static. - -#![allow(non_camel_case_types)] - -pub use self::Dest::*; -use self::lazy_binop_ty::*; - -use llvm::{self, ValueRef, TypeKind}; -use middle::const_qualif::ConstQualif; -use rustc::hir::def::Def; -use rustc::ty::subst::Substs; -use {_match, abi, adt, asm, base, closure, consts, controlflow}; -use base::*; -use build::*; -use callee::{Callee, ArgExprs, ArgOverloadedCall, ArgOverloadedOp}; -use cleanup::{self, CleanupMethods, DropHintMethods}; -use common::*; -use datum::*; -use debuginfo::{self, DebugLoc, ToDebugLoc}; -use glue; -use machine; -use tvec; -use type_of; -use value::Value; -use Disr; -use rustc::ty::adjustment::{AdjustNeverToAny, AdjustDerefRef, AdjustReifyFnPointer}; -use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer}; -use rustc::ty::adjustment::CustomCoerceUnsized; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::MethodCall; -use rustc::ty::cast::{CastKind, CastTy}; -use util::common::indenter; -use machine::{llsize_of, llsize_of_alloc}; -use type_::Type; - -use rustc::hir; - -use syntax::ast; -use syntax::parse::token::InternedString; -use syntax_pos; -use std::fmt; -use std::mem; - -// Destinations - -// These are passed around by the code generating functions to track the -// destination of a computation's value. - -#[derive(Copy, Clone, PartialEq)] -pub enum Dest { - SaveIn(ValueRef), - Ignore, -} - -impl fmt::Debug for Dest { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - SaveIn(v) => write!(f, "SaveIn({:?})", Value(v)), - Ignore => f.write_str("Ignore") - } - } -} - -/// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate -/// better optimized LLVM code. -pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - dest: Dest) - -> Block<'blk, 'tcx> { - let mut bcx = bcx; - - expr.debug_loc().apply(bcx.fcx); - - if adjustment_required(bcx, expr) { - // use trans, which may be less efficient but - // which will perform the adjustments: - let datum = unpack_datum!(bcx, trans(bcx, expr)); - return datum.store_to_dest(bcx, dest, expr.id); - } - - let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap(); - if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) { - if !qualif.intersects(ConstQualif::PREFER_IN_PLACE) { - if let SaveIn(lldest) = dest { - match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif, - bcx.fcx.param_substs, - consts::TrueConst::No) { - Ok(global) => { - // Cast pointer to destination, because constants - // have different types. - let lldest = PointerCast(bcx, lldest, val_ty(global)); - memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr)); - return bcx; - }, - Err(consts::ConstEvalFailure::Runtime(_)) => { - // in case const evaluation errors, translate normally - // debug assertions catch the same errors - // see RFC 1229 - }, - Err(consts::ConstEvalFailure::Compiletime(_)) => { - return bcx; - }, - } - } - - // If we see a const here, that's because it evaluates to a type with zero size. We - // should be able to just discard it, since const expressions are guaranteed not to - // have side effects. This seems to be reached through tuple struct constructors being - // passed zero-size constants. - if let hir::ExprPath(..) = expr.node { - match bcx.tcx().expect_def(expr.id) { - Def::Const(_) | Def::AssociatedConst(_) => { - assert!(type_is_zero_size(bcx.ccx(), bcx.tcx().node_id_to_type(expr.id))); - return bcx; - } - _ => {} - } - } - - // Even if we don't have a value to emit, and the expression - // doesn't have any side-effects, we still have to translate the - // body of any closures. - // FIXME: Find a better way of handling this case. - } else { - // The only way we're going to see a `const` at this point is if - // it prefers in-place instantiation, likely because it contains - // `[x; N]` somewhere within. - match expr.node { - hir::ExprPath(..) => { - match bcx.tcx().expect_def(expr.id) { - Def::Const(did) | Def::AssociatedConst(did) => { - let empty_substs = bcx.tcx().mk_substs(Substs::empty()); - let const_expr = consts::get_const_expr(bcx.ccx(), did, expr, - empty_substs); - // Temporarily get cleanup scopes out of the way, - // as they require sub-expressions to be contained - // inside the current AST scope. - // These should record no cleanups anyways, `const` - // can't have destructors. - let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(), - vec![]); - // Lock emitted debug locations to the location of - // the constant reference expression. - debuginfo::with_source_location_override(bcx.fcx, - expr.debug_loc(), - || { - bcx = trans_into(bcx, const_expr, dest) - }); - let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(), - scopes); - assert!(scopes.is_empty()); - return bcx; - } - _ => {} - } - } - _ => {} - } - } - } - - debug!("trans_into() expr={:?}", expr); - - let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), - expr.id, - expr.span, - false); - bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc); - - let kind = expr_kind(bcx.tcx(), expr); - bcx = match kind { - ExprKind::Lvalue | ExprKind::RvalueDatum => { - trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id) - } - ExprKind::RvalueDps => { - trans_rvalue_dps_unadjusted(bcx, expr, dest) - } - ExprKind::RvalueStmt => { - trans_rvalue_stmt_unadjusted(bcx, expr) - } - }; - - bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id) -} - -/// Translates an expression, returning a datum (and new block) encapsulating the result. When -/// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the -/// stack. -pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - debug!("trans(expr={:?})", expr); - - let mut bcx = bcx; - let fcx = bcx.fcx; - let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap(); - let adjusted_global = !qualif.intersects(ConstQualif::NON_STATIC_BORROWS); - let global = if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) { - match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif, - bcx.fcx.param_substs, - consts::TrueConst::No) { - Ok(global) => { - if qualif.intersects(ConstQualif::HAS_STATIC_BORROWS) { - // Is borrowed as 'static, must return lvalue. - - // Cast pointer to global, because constants have different types. - let const_ty = expr_ty_adjusted(bcx, expr); - let llty = type_of::type_of(bcx.ccx(), const_ty); - let global = PointerCast(bcx, global, llty.ptr_to()); - let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans")); - return DatumBlock::new(bcx, datum.to_expr_datum()); - } - - // Otherwise, keep around and perform adjustments, if needed. - let const_ty = if adjusted_global { - expr_ty_adjusted(bcx, expr) - } else { - expr_ty(bcx, expr) - }; - - // This could use a better heuristic. - Some(if type_is_immediate(bcx.ccx(), const_ty) { - // Cast pointer to global, because constants have different types. - let llty = type_of::type_of(bcx.ccx(), const_ty); - let global = PointerCast(bcx, global, llty.ptr_to()); - // Maybe just get the value directly, instead of loading it? - immediate_rvalue(load_ty(bcx, global, const_ty), const_ty) - } else { - let scratch = alloc_ty(bcx, const_ty, "const"); - call_lifetime_start(bcx, scratch); - let lldest = if !const_ty.is_structural() { - // Cast pointer to slot, because constants have different types. - PointerCast(bcx, scratch, val_ty(global)) - } else { - // In this case, memcpy_ty calls llvm.memcpy after casting both - // source and destination to i8*, so we don't need any casts. - scratch - }; - memcpy_ty(bcx, lldest, global, const_ty); - Datum::new(scratch, const_ty, Rvalue::new(ByRef)) - }) - }, - Err(consts::ConstEvalFailure::Runtime(_)) => { - // in case const evaluation errors, translate normally - // debug assertions catch the same errors - // see RFC 1229 - None - }, - Err(consts::ConstEvalFailure::Compiletime(_)) => { - // generate a dummy llvm value - let const_ty = expr_ty(bcx, expr); - let llty = type_of::type_of(bcx.ccx(), const_ty); - let dummy = C_undef(llty.ptr_to()); - Some(Datum::new(dummy, const_ty, Rvalue::new(ByRef))) - }, - } - } else { - None - }; - - let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), - expr.id, - expr.span, - false); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); - let datum = match global { - Some(rvalue) => rvalue.to_expr_datum(), - None => unpack_datum!(bcx, trans_unadjusted(bcx, expr)) - }; - let datum = if adjusted_global { - datum // trans::consts already performed adjustments. - } else { - unpack_datum!(bcx, apply_adjustments(bcx, expr, datum)) - }; - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id); - return DatumBlock::new(bcx, datum); -} - -pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef { - StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA) -} - -pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef { - StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR) -} - -pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) { - Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr)); - Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr)); -} - -fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) -> bool { - let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() { - None => { return false; } - Some(adj) => adj - }; - - // Don't skip a conversion from Box to &T, etc. - if bcx.tcx().is_overloaded_autoderef(expr.id, 0) { - return true; - } - - match adjustment { - AdjustNeverToAny(..) => true, - AdjustReifyFnPointer => true, - AdjustUnsafeFnPointer | AdjustMutToConstPointer => { - // purely a type-level thing - false - } - AdjustDerefRef(ref adj) => { - // We are a bit paranoid about adjustments and thus might have a re- - // borrow here which merely derefs and then refs again (it might have - // a different region or mutability, but we don't care here). - !(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none()) - } - } -} - -/// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted -/// translation of `expr`. -fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - datum: Datum<'tcx, Expr>) - -> DatumBlock<'blk, 'tcx, Expr> -{ - let mut bcx = bcx; - let mut datum = datum; - let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() { - None => { - return DatumBlock::new(bcx, datum); - } - Some(adj) => { adj } - }; - debug!("unadjusted datum for expr {:?}: {:?} adjustment={:?}", - expr, datum, adjustment); - match adjustment { - AdjustNeverToAny(ref target) => { - let mono_target = bcx.monomorphize(target); - let llty = type_of::type_of(bcx.ccx(), mono_target); - let dummy = C_undef(llty.ptr_to()); - datum = Datum::new(dummy, mono_target, Lvalue::new("never")).to_expr_datum(); - } - AdjustReifyFnPointer => { - match datum.ty.sty { - ty::TyFnDef(def_id, substs, _) => { - datum = Callee::def(bcx.ccx(), def_id, substs) - .reify(bcx.ccx()).to_expr_datum(); - } - _ => { - bug!("{} cannot be reified to a fn ptr", datum.ty) - } - } - } - AdjustUnsafeFnPointer | AdjustMutToConstPointer => { - // purely a type-level thing - } - AdjustDerefRef(ref adj) => { - let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() { - // We are a bit paranoid about adjustments and thus might have a re- - // borrow here which merely derefs and then refs again (it might have - // a different region or mutability, but we don't care here). - match datum.ty.sty { - // Don't skip a conversion from Box to &T, etc. - ty::TyRef(..) => { - if bcx.tcx().is_overloaded_autoderef(expr.id, 0) { - // Don't skip an overloaded deref. - 0 - } else { - 1 - } - } - _ => 0 - } - } else { - 0 - }; - - if adj.autoderefs > skip_reborrows { - // Schedule cleanup. - let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id)); - datum = unpack_datum!(bcx, deref_multiple(bcx, expr, - lval.to_expr_datum(), - adj.autoderefs - skip_reborrows)); - } - - // (You might think there is a more elegant way to do this than a - // skip_reborrows bool, but then you remember that the borrow checker exists). - if skip_reborrows == 0 && adj.autoref.is_some() { - datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr)); - } - - if let Some(target) = adj.unsize { - // We do not arrange cleanup ourselves; if we already are an - // L-value, then cleanup will have already been scheduled (and - // the `datum.to_rvalue_datum` call below will emit code to zero - // the drop flag when moving out of the L-value). If we are an - // R-value, then we do not need to schedule cleanup. - let source_datum = unpack_datum!(bcx, - datum.to_rvalue_datum(bcx, "__coerce_source")); - - let target = bcx.monomorphize(&target); - - let scratch = alloc_ty(bcx, target, "__coerce_target"); - call_lifetime_start(bcx, scratch); - let target_datum = Datum::new(scratch, target, - Rvalue::new(ByRef)); - bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum); - datum = Datum::new(scratch, target, - RvalueExpr(Rvalue::new(ByRef))); - } - } - } - debug!("after adjustments, datum={:?}", datum); - DatumBlock::new(bcx, datum) -} - -fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - span: syntax_pos::Span, - source: Datum<'tcx, Rvalue>, - target: Datum<'tcx, Rvalue>) - -> Block<'blk, 'tcx> { - let mut bcx = bcx; - debug!("coerce_unsized({:?} -> {:?})", source, target); - - match (&source.ty.sty, &target.ty.sty) { - (&ty::TyBox(a), &ty::TyBox(b)) | - (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }), - &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) | - (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }), - &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) | - (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), - &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { - let (inner_source, inner_target) = (a, b); - - let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) { - // Normally, the source is a thin pointer and we are - // adding extra info to make a fat pointer. The exception - // is when we are upcasting an existing object fat pointer - // to use a different vtable. In that case, we want to - // load out the original data pointer so we can repackage - // it. - (Load(bcx, get_dataptr(bcx, source.val)), - Some(Load(bcx, get_meta(bcx, source.val)))) - } else { - let val = if source.kind.is_by_ref() { - load_ty(bcx, source.val, source.ty) - } else { - source.val - }; - (val, None) - }; - - let info = unsized_info(bcx.ccx(), inner_source, inner_target, old_info); - - // Compute the base pointer. This doesn't change the pointer value, - // but merely its type. - let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to(); - let base = PointerCast(bcx, base, ptr_ty); - - Store(bcx, base, get_dataptr(bcx, target.val)); - Store(bcx, info, get_meta(bcx, target.val)); - } - - // This can be extended to enums and tuples in the future. - // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) | - (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => { - assert_eq!(def_id_a, def_id_b); - - // The target is already by-ref because it's to be written to. - let source = unpack_datum!(bcx, source.to_ref_datum(bcx)); - assert!(target.kind.is_by_ref()); - - let kind = custom_coerce_unsize_info(bcx.ccx().shared(), - source.ty, - target.ty); - - let repr_source = adt::represent_type(bcx.ccx(), source.ty); - let src_fields = match &*repr_source { - &adt::Repr::Univariant(ref s, _) => &s.fields, - _ => span_bug!(span, - "Non univariant struct? (repr_source: {:?})", - repr_source), - }; - let repr_target = adt::represent_type(bcx.ccx(), target.ty); - let target_fields = match &*repr_target { - &adt::Repr::Univariant(ref s, _) => &s.fields, - _ => span_bug!(span, - "Non univariant struct? (repr_target: {:?})", - repr_target), - }; - - let coerce_index = match kind { - CustomCoerceUnsized::Struct(i) => i - }; - assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len()); - - let source_val = adt::MaybeSizedValue::sized(source.val); - let target_val = adt::MaybeSizedValue::sized(target.val); - - let iter = src_fields.iter().zip(target_fields).enumerate(); - for (i, (src_ty, target_ty)) in iter { - let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, Disr(0), i); - let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, Disr(0), i); - - // If this is the field we need to coerce, recurse on it. - if i == coerce_index { - coerce_unsized(bcx, span, - Datum::new(ll_source, src_ty, - Rvalue::new(ByRef)), - Datum::new(ll_target, target_ty, - Rvalue::new(ByRef))); - } else { - // Otherwise, simply copy the data from the source. - assert!(src_ty.is_phantom_data() || src_ty == target_ty); - memcpy_ty(bcx, ll_target, ll_source, src_ty); - } - } - } - _ => bug!("coerce_unsized: invalid coercion {:?} -> {:?}", - source.ty, - target.ty) - } - bcx -} - -/// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory -/// that the expr represents. -/// -/// If this expression is an rvalue, this implies introducing a temporary. In other words, -/// something like `x().f` is translated into roughly the equivalent of -/// -/// { tmp = x(); tmp.f } -pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - name: &str) - -> DatumBlock<'blk, 'tcx, Lvalue> { - let mut bcx = bcx; - let datum = unpack_datum!(bcx, trans(bcx, expr)); - return datum.to_lvalue_datum(bcx, name, expr.id); -} - -/// A version of `trans` that ignores adjustments. You almost certainly do not want to call this -/// directly. -fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - - debug!("trans_unadjusted(expr={:?})", expr); - let _indenter = indenter(); - - expr.debug_loc().apply(bcx.fcx); - - return match expr_kind(bcx.tcx(), expr) { - ExprKind::Lvalue | ExprKind::RvalueDatum => { - let datum = unpack_datum!(bcx, { - trans_datum_unadjusted(bcx, expr) - }); - - DatumBlock {bcx: bcx, datum: datum} - } - - ExprKind::RvalueStmt => { - bcx = trans_rvalue_stmt_unadjusted(bcx, expr); - nil(bcx, expr_ty(bcx, expr)) - } - - ExprKind::RvalueDps => { - let ty = expr_ty(bcx, expr); - if type_is_zero_size(bcx.ccx(), ty) { - bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore); - nil(bcx, ty) - } else { - let scratch = rvalue_scratch_datum(bcx, ty, ""); - bcx = trans_rvalue_dps_unadjusted( - bcx, expr, SaveIn(scratch.val)); - - // Note: this is not obviously a good idea. It causes - // immediate values to be loaded immediately after a - // return from a call or other similar expression, - // which in turn leads to alloca's having shorter - // lifetimes and hence larger stack frames. However, - // in turn it can lead to more register pressure. - // Still, in practice it seems to increase - // performance, since we have fewer problems with - // morestack churn. - let scratch = unpack_datum!( - bcx, scratch.to_appropriate_datum(bcx)); - - DatumBlock::new(bcx, scratch.to_expr_datum()) - } - } - }; - - fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>) - -> DatumBlock<'blk, 'tcx, Expr> { - let llval = C_undef(type_of::type_of(bcx.ccx(), ty)); - let datum = immediate_rvalue(llval, ty); - DatumBlock::new(bcx, datum.to_expr_datum()) - } -} - -fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - let fcx = bcx.fcx; - let _icx = push_ctxt("trans_datum_unadjusted"); - - match expr.node { - hir::ExprType(ref e, _) => { - trans(bcx, &e) - } - hir::ExprPath(..) => { - let var = trans_var(bcx, bcx.tcx().expect_def(expr.id)); - DatumBlock::new(bcx, var.to_expr_datum()) - } - hir::ExprField(ref base, name) => { - trans_rec_field(bcx, &base, name.node) - } - hir::ExprTupField(ref base, idx) => { - trans_rec_tup_field(bcx, &base, idx.node) - } - hir::ExprIndex(ref base, ref idx) => { - trans_index(bcx, expr, &base, &idx, MethodCall::expr(expr.id)) - } - hir::ExprBox(ref contents) => { - // Special case for `Box` - let box_ty = expr_ty(bcx, expr); - let contents_ty = expr_ty(bcx, &contents); - match box_ty.sty { - ty::TyBox(..) => { - trans_uniq_expr(bcx, expr, box_ty, &contents, contents_ty) - } - _ => span_bug!(expr.span, - "expected unique box") - } - - } - hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &lit), - hir::ExprBinary(op, ref lhs, ref rhs) => { - trans_binary(bcx, expr, op, &lhs, &rhs) - } - hir::ExprUnary(op, ref x) => { - trans_unary(bcx, expr, op, &x) - } - hir::ExprAddrOf(_, ref x) => { - match x.node { - hir::ExprRepeat(..) | hir::ExprVec(..) => { - // Special case for slices. - let cleanup_debug_loc = - debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), - x.id, - x.span, - false); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); - let datum = unpack_datum!( - bcx, tvec::trans_slice_vec(bcx, expr, &x)); - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id); - DatumBlock::new(bcx, datum) - } - _ => { - trans_addr_of(bcx, expr, &x) - } - } - } - hir::ExprCast(ref val, _) => { - // Datum output mode means this is a scalar cast: - trans_imm_cast(bcx, &val, expr.id) - } - _ => { - span_bug!( - expr.span, - "trans_rvalue_datum_unadjusted reached \ - fall-through case: {:?}", - expr.node); - } - } -} - -fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - base: &hir::Expr, - get_idx: F) - -> DatumBlock<'blk, 'tcx, Expr> where - F: FnOnce(TyCtxt<'blk, 'tcx, 'tcx>, &VariantInfo<'tcx>) -> usize, -{ - let mut bcx = bcx; - let _icx = push_ctxt("trans_rec_field"); - - let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field")); - let bare_ty = base_datum.ty; - let repr = adt::represent_type(bcx.ccx(), bare_ty); - let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None); - - let ix = get_idx(bcx.tcx(), &vinfo); - let d = base_datum.get_element( - bcx, - vinfo.fields[ix].1, - |srcval| { - adt::trans_field_ptr(bcx, &repr, srcval, vinfo.discr, ix) - }); - - if type_is_sized(bcx.tcx(), d.ty) { - DatumBlock { datum: d.to_expr_datum(), bcx: bcx } - } else { - let scratch = rvalue_scratch_datum(bcx, d.ty, ""); - Store(bcx, d.val, get_dataptr(bcx, scratch.val)); - let info = Load(bcx, get_meta(bcx, base_datum.val)); - Store(bcx, info, get_meta(bcx, scratch.val)); - - // Always generate an lvalue datum, because this pointer doesn't own - // the data and cleanup is scheduled elsewhere. - DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind))) - } -} - -/// Translates `base.field`. -fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - base: &hir::Expr, - field: ast::Name) - -> DatumBlock<'blk, 'tcx, Expr> { - trans_field(bcx, base, |_, vinfo| vinfo.field_index(field)) -} - -/// Translates `base.`. -fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - base: &hir::Expr, - idx: usize) - -> DatumBlock<'blk, 'tcx, Expr> { - trans_field(bcx, base, |_, _| idx) -} - -fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - index_expr: &hir::Expr, - base: &hir::Expr, - idx: &hir::Expr, - method_call: MethodCall) - -> DatumBlock<'blk, 'tcx, Expr> { - //! Translates `base[idx]`. - - let _icx = push_ctxt("trans_index"); - let ccx = bcx.ccx(); - let mut bcx = bcx; - - let index_expr_debug_loc = index_expr.debug_loc(); - - // Check for overloaded index. - let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned(); - let elt_datum = match method { - Some(method) => { - let method_ty = monomorphize_type(bcx, method.ty); - - let base_datum = unpack_datum!(bcx, trans(bcx, base)); - - // Translate index expression. - let ix_datum = unpack_datum!(bcx, trans(bcx, idx)); - - let ref_ty = // invoked methods have LB regions instantiated: - bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap(); - let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) { - None => { - span_bug!(index_expr.span, - "index method didn't return a \ - dereferenceable type?!") - } - Some(elt_tm) => elt_tm.ty, - }; - - // Overloaded. Invoke the index() method, which basically - // yields a `&T` pointer. We can then proceed down the - // normal path (below) to dereference that `&T`. - let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt"); - - bcx = Callee::method(bcx, method) - .call(bcx, index_expr_debug_loc, - ArgOverloadedOp(base_datum, Some(ix_datum)), - Some(SaveIn(scratch.val))).bcx; - - let datum = scratch.to_expr_datum(); - let lval = Lvalue::new("expr::trans_index overload"); - if type_is_sized(bcx.tcx(), elt_ty) { - Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval)) - } else { - Datum::new(datum.val, elt_ty, LvalueExpr(lval)) - } - } - None => { - let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, - base, - "index")); - - // Translate index expression and cast to a suitable LLVM integer. - // Rust is less strict than LLVM in this regard. - let ix_datum = unpack_datum!(bcx, trans(bcx, idx)); - let ix_val = ix_datum.to_llscalarish(bcx); - let ix_size = machine::llbitsize_of_real(bcx.ccx(), - val_ty(ix_val)); - let int_size = machine::llbitsize_of_real(bcx.ccx(), - ccx.int_type()); - let ix_val = { - if ix_size < int_size { - if expr_ty(bcx, idx).is_signed() { - SExt(bcx, ix_val, ccx.int_type()) - } else { ZExt(bcx, ix_val, ccx.int_type()) } - } else if ix_size > int_size { - Trunc(bcx, ix_val, ccx.int_type()) - } else { - ix_val - } - }; - - let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx()); - - let (base, len) = base_datum.get_vec_base_and_len(bcx); - - debug!("trans_index: base {:?}", Value(base)); - debug!("trans_index: len {:?}", Value(len)); - - let bounds_check = ICmp(bcx, - llvm::IntUGE, - ix_val, - len, - index_expr_debug_loc); - let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - let expected = Call(bcx, - expect, - &[bounds_check, C_bool(ccx, false)], - index_expr_debug_loc); - bcx = with_cond(bcx, expected, |bcx| { - controlflow::trans_fail_bounds_check(bcx, - expr_info(index_expr), - ix_val, - len) - }); - let elt = InBoundsGEP(bcx, base, &[ix_val]); - let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to()); - let lval = Lvalue::new("expr::trans_index fallback"); - Datum::new(elt, unit_ty, LvalueExpr(lval)) - } - }; - - DatumBlock::new(bcx, elt_datum) -} - -/// Translates a reference to a variable. -pub fn trans_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def: Def) - -> Datum<'tcx, Lvalue> { - - match def { - Def::Static(did, _) => consts::get_static(bcx.ccx(), did), - Def::Upvar(_, nid, _, _) => { - // Can't move upvars, so this is never a ZeroMemLastUse. - let local_ty = node_id_type(bcx, nid); - let lval = Lvalue::new_with_hint("expr::trans_var (upvar)", - bcx, nid, HintKind::ZeroAndMaintain); - match bcx.fcx.llupvars.borrow().get(&nid) { - Some(&val) => Datum::new(val, local_ty, lval), - None => { - bug!("trans_var: no llval for upvar {} found", nid); - } - } - } - Def::Local(_, nid) => { - let datum = match bcx.fcx.lllocals.borrow().get(&nid) { - Some(&v) => v, - None => { - bug!("trans_var: no datum for local/arg {} found", nid); - } - }; - debug!("take_local(nid={}, v={:?}, ty={})", - nid, Value(datum.val), datum.ty); - datum - } - _ => bug!("{:?} should not reach expr::trans_var", def) - } -} - -fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) - -> Block<'blk, 'tcx> { - let mut bcx = bcx; - let _icx = push_ctxt("trans_rvalue_stmt"); - - if bcx.unreachable.get() { - return bcx; - } - - expr.debug_loc().apply(bcx.fcx); - - match expr.node { - hir::ExprBreak(label_opt) => { - controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node)) - } - hir::ExprType(ref e, _) => { - trans_into(bcx, &e, Ignore) - } - hir::ExprAgain(label_opt) => { - controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node)) - } - hir::ExprRet(ref ex) => { - // Check to see if the return expression itself is reachable. - // This can occur when the inner expression contains a return - let reachable = if let Some(ref cfg) = bcx.fcx.cfg { - cfg.node_is_reachable(expr.id) - } else { - true - }; - - if reachable { - controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e)) - } else { - // If it's not reachable, just translate the inner expression - // directly. This avoids having to manage a return slot when - // it won't actually be used anyway. - if let &Some(ref x) = ex { - bcx = trans_into(bcx, &x, Ignore); - } - // Mark the end of the block as unreachable. Once we get to - // a return expression, there's no more we should be doing - // after this. - Unreachable(bcx); - bcx - } - } - hir::ExprWhile(ref cond, ref body, _) => { - controlflow::trans_while(bcx, expr, &cond, &body) - } - hir::ExprLoop(ref body, _) => { - controlflow::trans_loop(bcx, expr, &body) - } - hir::ExprAssign(ref dst, ref src) => { - let src_datum = unpack_datum!(bcx, trans(bcx, &src)); - let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &dst, "assign")); - - if bcx.fcx.type_needs_drop(dst_datum.ty) { - // If there are destructors involved, make sure we - // are copying from an rvalue, since that cannot possible - // alias an lvalue. We are concerned about code like: - // - // a = a - // - // but also - // - // a = a.b - // - // where e.g. a : Option and a.b : - // Option. In that case, freeing `a` before the - // assignment may also free `a.b`! - // - // We could avoid this intermediary with some analysis - // to determine whether `dst` may possibly own `src`. - expr.debug_loc().apply(bcx.fcx); - let src_datum = unpack_datum!( - bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign")); - let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx); - let opt_hint_val = opt_hint_datum.map(|d|d.to_value()); - - // 1. Drop the data at the destination, passing the - // drop-hint in case the lvalue has already been - // dropped or moved. - bcx = glue::drop_ty_core(bcx, - dst_datum.val, - dst_datum.ty, - expr.debug_loc(), - false, - opt_hint_val); - - // 2. We are overwriting the destination; ensure that - // its drop-hint (if any) says "initialized." - if let Some(hint_val) = opt_hint_val { - let hint_llval = hint_val.value(); - let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT); - Store(bcx, drop_needed, hint_llval); - } - src_datum.store_to(bcx, dst_datum.val) - } else { - src_datum.store_to(bcx, dst_datum.val) - } - } - hir::ExprAssignOp(op, ref dst, ref src) => { - let method = bcx.tcx().tables - .borrow() - .method_map - .get(&MethodCall::expr(expr.id)).cloned(); - - if let Some(method) = method { - let dst = unpack_datum!(bcx, trans(bcx, &dst)); - let src_datum = unpack_datum!(bcx, trans(bcx, &src)); - - Callee::method(bcx, method) - .call(bcx, expr.debug_loc(), - ArgOverloadedOp(dst, Some(src_datum)), None).bcx - } else { - trans_assign_op(bcx, expr, op, &dst, &src) - } - } - hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => { - let outputs = outputs.iter().map(|output| { - let out_datum = unpack_datum!(bcx, trans(bcx, output)); - unpack_datum!(bcx, out_datum.to_lvalue_datum(bcx, "out", expr.id)) - }).collect(); - let inputs = inputs.iter().map(|input| { - let input = unpack_datum!(bcx, trans(bcx, input)); - let input = unpack_datum!(bcx, input.to_rvalue_datum(bcx, "in")); - input.to_llscalarish(bcx) - }).collect(); - asm::trans_inline_asm(bcx, a, outputs, inputs); - bcx - } - _ => { - span_bug!( - expr.span, - "trans_rvalue_stmt_unadjusted reached \ - fall-through case: {:?}", - expr.node); - } - } -} - -fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - dest: Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_rvalue_dps_unadjusted"); - let mut bcx = bcx; - - expr.debug_loc().apply(bcx.fcx); - - // Entry into the method table if this is an overloaded call/op. - let method_call = MethodCall::expr(expr.id); - - match expr.node { - hir::ExprType(ref e, _) => { - trans_into(bcx, &e, dest) - } - hir::ExprPath(..) => { - trans_def_dps_unadjusted(bcx, expr, bcx.tcx().expect_def(expr.id), dest) - } - hir::ExprIf(ref cond, ref thn, ref els) => { - controlflow::trans_if(bcx, expr.id, &cond, &thn, els.as_ref().map(|e| &**e), dest) - } - hir::ExprMatch(ref discr, ref arms, _) => { - _match::trans_match(bcx, expr, &discr, &arms[..], dest) - } - hir::ExprBlock(ref blk) => { - controlflow::trans_block(bcx, &blk, dest) - } - hir::ExprStruct(_, ref fields, ref base) => { - trans_struct(bcx, - &fields[..], - base.as_ref().map(|e| &**e), - expr.span, - expr.id, - node_id_type(bcx, expr.id), - dest) - } - hir::ExprTup(ref args) => { - let numbered_fields: Vec<(usize, &hir::Expr)> = - args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect(); - trans_adt(bcx, - expr_ty(bcx, expr), - Disr(0), - &numbered_fields[..], - None, - dest, - expr.debug_loc()) - } - hir::ExprLit(ref lit) => { - match lit.node { - ast::LitKind::Str(ref s, _) => { - tvec::trans_lit_str(bcx, expr, (*s).clone(), dest) - } - _ => { - span_bug!(expr.span, - "trans_rvalue_dps_unadjusted shouldn't be \ - translating this type of literal") - } - } - } - hir::ExprVec(..) | hir::ExprRepeat(..) => { - tvec::trans_fixed_vstore(bcx, expr, dest) - } - hir::ExprClosure(_, ref decl, ref body, _) => { - let dest = match dest { - SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest), - Ignore => closure::Dest::Ignore(bcx.ccx()) - }; - - // NB. To get the id of the closure, we don't use - // `local_def_id(id)`, but rather we extract the closure - // def-id from the expr's type. This is because this may - // be an inlined expression from another crate, and we - // want to get the ORIGINAL closure def-id, since that is - // the key we need to find the closure-kind and - // closure-type etc. - let (def_id, substs) = match expr_ty(bcx, expr).sty { - ty::TyClosure(def_id, substs) => (def_id, substs), - ref t => - span_bug!( - expr.span, - "closure expr without closure type: {:?}", t), - }; - - closure::trans_closure_expr(dest, - decl, - body, - expr.id, - def_id, - substs).unwrap_or(bcx) - } - hir::ExprCall(ref f, ref args) => { - let method = bcx.tcx().tables.borrow().method_map.get(&method_call).cloned(); - let (callee, args) = if let Some(method) = method { - let mut all_args = vec![&**f]; - all_args.extend(args.iter().map(|e| &**e)); - - (Callee::method(bcx, method), ArgOverloadedCall(all_args)) - } else { - let f = unpack_datum!(bcx, trans(bcx, f)); - (match f.ty.sty { - ty::TyFnDef(def_id, substs, _) => { - Callee::def(bcx.ccx(), def_id, substs) - } - ty::TyFnPtr(_) => { - let f = unpack_datum!(bcx, - f.to_rvalue_datum(bcx, "callee")); - Callee::ptr(f) - } - _ => { - span_bug!(expr.span, - "type of callee is not a fn: {}", f.ty); - } - }, ArgExprs(&args)) - }; - callee.call(bcx, expr.debug_loc(), args, Some(dest)).bcx - } - hir::ExprMethodCall(_, _, ref args) => { - Callee::method_call(bcx, method_call) - .call(bcx, expr.debug_loc(), ArgExprs(&args), Some(dest)).bcx - } - hir::ExprBinary(op, ref lhs, ref rhs_expr) => { - // if not overloaded, would be RvalueDatumExpr - let lhs = unpack_datum!(bcx, trans(bcx, &lhs)); - let mut rhs = unpack_datum!(bcx, trans(bcx, &rhs_expr)); - if !op.node.is_by_value() { - rhs = unpack_datum!(bcx, auto_ref(bcx, rhs, rhs_expr)); - } - - Callee::method_call(bcx, method_call) - .call(bcx, expr.debug_loc(), - ArgOverloadedOp(lhs, Some(rhs)), Some(dest)).bcx - } - hir::ExprUnary(_, ref subexpr) => { - // if not overloaded, would be RvalueDatumExpr - let arg = unpack_datum!(bcx, trans(bcx, &subexpr)); - - Callee::method_call(bcx, method_call) - .call(bcx, expr.debug_loc(), - ArgOverloadedOp(arg, None), Some(dest)).bcx - } - hir::ExprCast(..) => { - // Trait casts used to come this way, now they should be coercions. - span_bug!(expr.span, "DPS expr_cast (residual trait cast?)") - } - hir::ExprAssignOp(op, _, _) => { - span_bug!( - expr.span, - "augmented assignment `{}=` should always be a rvalue_stmt", - op.node.as_str()) - } - _ => { - span_bug!( - expr.span, - "trans_rvalue_dps_unadjusted reached fall-through \ - case: {:?}", - expr.node); - } - } -} - -fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ref_expr: &hir::Expr, - def: Def, - dest: Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_def_dps_unadjusted"); - - let lldest = match dest { - SaveIn(lldest) => lldest, - Ignore => { return bcx; } - }; - - let ty = expr_ty(bcx, ref_expr); - if let ty::TyFnDef(..) = ty.sty { - // Zero-sized function or ctor. - return bcx; - } - - match def { - Def::Variant(tid, vid) => { - let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid); - // Nullary variant. - let ty = expr_ty(bcx, ref_expr); - let repr = adt::represent_type(bcx.ccx(), ty); - adt::trans_set_discr(bcx, &repr, lldest, Disr::from(variant.disr_val)); - bcx - } - Def::Struct(..) => { - match ty.sty { - ty::TyStruct(def, _) if def.has_dtor() => { - let repr = adt::represent_type(bcx.ccx(), ty); - adt::trans_set_discr(bcx, &repr, lldest, Disr(0)); - } - _ => {} - } - bcx - } - _ => { - span_bug!(ref_expr.span, - "Non-DPS def {:?} referened by {}", - def, bcx.node_id_to_string(ref_expr.id)); - } - } -} - -fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - fields: &[hir::Field], - base: Option<&hir::Expr>, - expr_span: syntax_pos::Span, - expr_id: ast::NodeId, - ty: Ty<'tcx>, - dest: Dest) -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_rec"); - - let tcx = bcx.tcx(); - let vinfo = VariantInfo::of_node(tcx, ty, expr_id); - - let mut need_base = vec![true; vinfo.fields.len()]; - - let numbered_fields = fields.iter().map(|field| { - let pos = vinfo.field_index(field.name.node); - need_base[pos] = false; - (pos, &*field.expr) - }).collect::>(); - - let optbase = match base { - Some(base_expr) => { - let mut leftovers = Vec::new(); - for (i, b) in need_base.iter().enumerate() { - if *b { - leftovers.push((i, vinfo.fields[i].1)); - } - } - Some(StructBaseInfo {expr: base_expr, - fields: leftovers }) - } - None => { - if need_base.iter().any(|b| *b) { - span_bug!(expr_span, "missing fields and no base expr") - } - None - } - }; - - trans_adt(bcx, - ty, - vinfo.discr, - &numbered_fields, - optbase, - dest, - DebugLoc::At(expr_id, expr_span)) -} - -/// Information that `trans_adt` needs in order to fill in the fields -/// of a struct copied from a base struct (e.g., from an expression -/// like `Foo { a: b, ..base }`. -/// -/// Note that `fields` may be empty; the base expression must always be -/// evaluated for side-effects. -pub struct StructBaseInfo<'a, 'tcx> { - /// The base expression; will be evaluated after all explicit fields. - expr: &'a hir::Expr, - /// The indices of fields to copy paired with their types. - fields: Vec<(usize, Ty<'tcx>)> -} - -/// Constructs an ADT instance: -/// -/// - `fields` should be a list of field indices paired with the -/// expression to store into that field. The initializers will be -/// evaluated in the order specified by `fields`. -/// -/// - `optbase` contains information on the base struct (if any) from -/// which remaining fields are copied; see comments on `StructBaseInfo`. -pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - discr: Disr, - fields: &[(usize, &hir::Expr)], - optbase: Option>, - dest: Dest, - debug_location: DebugLoc) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_adt"); - let fcx = bcx.fcx; - let repr = adt::represent_type(bcx.ccx(), ty); - - debug_location.apply(bcx.fcx); - - // If we don't care about the result, just make a - // temporary stack slot - let addr = match dest { - SaveIn(pos) => pos, - Ignore => { - let llresult = alloc_ty(bcx, ty, "temp"); - call_lifetime_start(bcx, llresult); - llresult - } - }; - - debug!("trans_adt"); - - // This scope holds intermediates that must be cleaned should - // panic occur before the ADT as a whole is ready. - let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); - - if ty.is_simd() { - // Issue 23112: The original logic appeared vulnerable to same - // order-of-eval bug. But, SIMD values are tuple-structs; - // i.e. functional record update (FRU) syntax is unavailable. - // - // To be safe, double-check that we did not get here via FRU. - assert!(optbase.is_none()); - - // This is the constructor of a SIMD type, such types are - // always primitive machine types and so do not have a - // destructor or require any clean-up. - let llty = type_of::type_of(bcx.ccx(), ty); - - // keep a vector as a register, and running through the field - // `insertelement`ing them directly into that register - // (i.e. avoid GEPi and `store`s to an alloca) . - let mut vec_val = C_undef(llty); - - for &(i, ref e) in fields { - let block_datum = trans(bcx, &e); - bcx = block_datum.bcx; - let position = C_uint(bcx.ccx(), i); - let value = block_datum.datum.to_llscalarish(bcx); - vec_val = InsertElement(bcx, vec_val, value, position); - } - Store(bcx, vec_val, addr); - } else if let Some(base) = optbase { - // Issue 23112: If there is a base, then order-of-eval - // requires field expressions eval'ed before base expression. - - // First, trans field expressions to temporary scratch values. - let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| { - let datum = unpack_datum!(bcx, trans(bcx, &e)); - (i, datum) - }).collect(); - - debug_location.apply(bcx.fcx); - - // Second, trans the base to the dest. - assert_eq!(discr, Disr(0)); - - let addr = adt::MaybeSizedValue::sized(addr); - match expr_kind(bcx.tcx(), &base.expr) { - ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => { - bcx = trans_into(bcx, &base.expr, SaveIn(addr.value)); - }, - ExprKind::RvalueStmt => { - bug!("unexpected expr kind for struct base expr") - } - _ => { - let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &base.expr, "base")); - for &(i, t) in &base.fields { - let datum = base_datum.get_element( - bcx, t, |srcval| adt::trans_field_ptr(bcx, &repr, srcval, discr, i)); - assert!(type_is_sized(bcx.tcx(), datum.ty)); - let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i); - bcx = datum.store_to(bcx, dest); - } - } - } - - // Finally, move scratch field values into actual field locations - for (i, datum) in scratch_vals { - let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i); - bcx = datum.store_to(bcx, dest); - } - } else { - // No base means we can write all fields directly in place. - let addr = adt::MaybeSizedValue::sized(addr); - for &(i, ref e) in fields { - let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i); - let e_ty = expr_ty_adjusted(bcx, &e); - bcx = trans_into(bcx, &e, SaveIn(dest)); - let scope = cleanup::CustomScope(custom_cleanup_scope); - fcx.schedule_lifetime_end(scope, dest); - // FIXME: nonzeroing move should generalize to fields - fcx.schedule_drop_mem(scope, dest, e_ty, None); - } - } - - adt::trans_set_discr(bcx, &repr, addr, discr); - - fcx.pop_custom_cleanup_scope(custom_cleanup_scope); - - // If we don't care about the result drop the temporary we made - match dest { - SaveIn(_) => bcx, - Ignore => { - bcx = glue::drop_ty(bcx, addr, ty, debug_location); - base::call_lifetime_end(bcx, addr); - bcx - } - } -} - - -fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - lit: &ast::Lit) - -> DatumBlock<'blk, 'tcx, Expr> { - // must not be a string constant, that is a RvalueDpsExpr - let _icx = push_ctxt("trans_immediate_lit"); - let ty = expr_ty(bcx, expr); - let v = consts::const_lit(bcx.ccx(), expr, lit); - immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock() -} - -fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - op: hir::UnOp, - sub_expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let ccx = bcx.ccx(); - let mut bcx = bcx; - let _icx = push_ctxt("trans_unary_datum"); - - let method_call = MethodCall::expr(expr.id); - - // The only overloaded operator that is translated to a datum - // is an overloaded deref, since it is always yields a `&T`. - // Otherwise, we should be in the RvalueDpsExpr path. - assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id)); - - let un_ty = expr_ty(bcx, expr); - - let debug_loc = expr.debug_loc(); - - match op { - hir::UnNot => { - let datum = unpack_datum!(bcx, trans(bcx, sub_expr)); - let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc); - immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock() - } - hir::UnNeg => { - let datum = unpack_datum!(bcx, trans(bcx, sub_expr)); - let val = datum.to_llscalarish(bcx); - let (bcx, llneg) = { - if un_ty.is_fp() { - let result = FNeg(bcx, val, debug_loc); - (bcx, result) - } else { - let is_signed = un_ty.is_signed(); - let result = Neg(bcx, val, debug_loc); - let bcx = if bcx.ccx().check_overflow() && is_signed { - let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty); - let is_min = ICmp(bcx, llvm::IntEQ, val, - C_integral(llty, min, true), debug_loc); - with_cond(bcx, is_min, |bcx| { - let msg = InternedString::new( - "attempt to negate with overflow"); - controlflow::trans_fail(bcx, expr_info(expr), msg) - }) - } else { - bcx - }; - (bcx, result) - } - }; - immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock() - } - hir::UnDeref => { - let datum = unpack_datum!(bcx, trans(bcx, sub_expr)); - deref_once(bcx, expr, datum, method_call) - } - } -} - -fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - box_expr: &hir::Expr, - box_ty: Ty<'tcx>, - contents: &hir::Expr, - contents_ty: Ty<'tcx>) - -> DatumBlock<'blk, 'tcx, Expr> { - let _icx = push_ctxt("trans_uniq_expr"); - let fcx = bcx.fcx; - assert!(type_is_sized(bcx.tcx(), contents_ty)); - let llty = type_of::type_of(bcx.ccx(), contents_ty); - let size = llsize_of(bcx.ccx(), llty); - let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty)); - let llty_ptr = llty.ptr_to(); - let Result { bcx, val } = malloc_raw_dyn(bcx, - llty_ptr, - box_ty, - size, - align, - box_expr.debug_loc()); - // Unique boxes do not allocate for zero-size types. The standard library - // may assume that `free` is never called on the pointer returned for - // `Box`. - let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 { - trans_into(bcx, contents, SaveIn(val)) - } else { - let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); - fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope), - val, cleanup::HeapExchange, contents_ty); - let bcx = trans_into(bcx, contents, SaveIn(val)); - fcx.pop_custom_cleanup_scope(custom_cleanup_scope); - bcx - }; - immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock() -} - -fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - subexpr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let _icx = push_ctxt("trans_addr_of"); - let mut bcx = bcx; - let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of")); - let ty = expr_ty(bcx, expr); - if !type_is_sized(bcx.tcx(), sub_datum.ty) { - // Always generate an lvalue datum, because this pointer doesn't own - // the data and cleanup is scheduled elsewhere. - DatumBlock::new(bcx, Datum::new(sub_datum.val, ty, LvalueExpr(sub_datum.kind))) - } else { - // Sized value, ref to a thin pointer - immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock() - } -} - -fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - binop_expr: &hir::Expr, - binop_ty: Ty<'tcx>, - op: hir::BinOp, - lhs: Datum<'tcx, Rvalue>, - rhs: Datum<'tcx, Rvalue>) - -> DatumBlock<'blk, 'tcx, Expr> -{ - let _icx = push_ctxt("trans_scalar_binop"); - - let lhs_t = lhs.ty; - assert!(!lhs_t.is_simd()); - let is_float = lhs_t.is_fp(); - let is_signed = lhs_t.is_signed(); - let info = expr_info(binop_expr); - - let binop_debug_loc = binop_expr.debug_loc(); - - let mut bcx = bcx; - let lhs = lhs.to_llscalarish(bcx); - let rhs = rhs.to_llscalarish(bcx); - let val = match op.node { - hir::BiAdd => { - if is_float { - FAdd(bcx, lhs, rhs, binop_debug_loc) - } else { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - } - hir::BiSub => { - if is_float { - FSub(bcx, lhs, rhs, binop_debug_loc) - } else { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - } - hir::BiMul => { - if is_float { - FMul(bcx, lhs, rhs, binop_debug_loc) - } else { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - } - hir::BiDiv => { - if is_float { - FDiv(bcx, lhs, rhs, binop_debug_loc) - } else { - // Only zero-check integers; fp /0 is NaN - bcx = base::fail_if_zero_or_overflows(bcx, - expr_info(binop_expr), - op, - lhs, - rhs, - lhs_t); - if is_signed { - SDiv(bcx, lhs, rhs, binop_debug_loc) - } else { - UDiv(bcx, lhs, rhs, binop_debug_loc) - } - } - } - hir::BiRem => { - if is_float { - FRem(bcx, lhs, rhs, binop_debug_loc) - } else { - // Only zero-check integers; fp %0 is NaN - bcx = base::fail_if_zero_or_overflows(bcx, - expr_info(binop_expr), - op, lhs, rhs, lhs_t); - if is_signed { - SRem(bcx, lhs, rhs, binop_debug_loc) - } else { - URem(bcx, lhs, rhs, binop_debug_loc) - } - } - } - hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc), - hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc), - hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc), - hir::BiShl => { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - hir::BiShr => { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => { - base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc) - } - _ => { - span_bug!(binop_expr.span, "unexpected binop"); - } - }; - - immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock() -} - -// refinement types would obviate the need for this -#[derive(Clone, Copy)] -enum lazy_binop_ty { - lazy_and, - lazy_or, -} - - -fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - binop_expr: &hir::Expr, - op: lazy_binop_ty, - a: &hir::Expr, - b: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let _icx = push_ctxt("trans_lazy_binop"); - let binop_ty = expr_ty(bcx, binop_expr); - let fcx = bcx.fcx; - - let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a); - let lhs = lhs.to_llscalarish(past_lhs); - - if past_lhs.unreachable.get() { - return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock(); - } - - // If the rhs can never be reached, don't generate code for it. - if let Some(cond_val) = const_to_opt_uint(lhs) { - match (cond_val, op) { - (0, lazy_and) | - (1, lazy_or) => { - return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock(); - } - _ => { /* continue */ } - } - } - - let join = fcx.new_id_block("join", binop_expr.id); - let before_rhs = fcx.new_id_block("before_rhs", b.id); - - match op { - lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None), - lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None) - } - - let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b); - let rhs = rhs.to_llscalarish(past_rhs); - - if past_rhs.unreachable.get() { - return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock(); - } - - Br(past_rhs, join.llbb, DebugLoc::None); - let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs], - &[past_lhs.llbb, past_rhs.llbb]); - - return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock(); -} - -fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - op: hir::BinOp, - lhs: &hir::Expr, - rhs: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let _icx = push_ctxt("trans_binary"); - let ccx = bcx.ccx(); - - // if overloaded, would be RvalueDpsExpr - assert!(!ccx.tcx().is_method_call(expr.id)); - - match op.node { - hir::BiAnd => { - trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs) - } - hir::BiOr => { - trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs) - } - _ => { - let mut bcx = bcx; - let binop_ty = expr_ty(bcx, expr); - - let lhs = unpack_datum!(bcx, trans(bcx, lhs)); - let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs")); - debug!("trans_binary (expr {}): lhs={:?}", expr.id, lhs); - let rhs = unpack_datum!(bcx, trans(bcx, rhs)); - let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs")); - debug!("trans_binary (expr {}): rhs={:?}", expr.id, rhs); - - if type_is_fat_ptr(ccx.tcx(), lhs.ty) { - assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty), - "built-in binary operators on fat pointers are homogeneous"); - assert_eq!(binop_ty, bcx.tcx().types.bool); - let val = base::compare_scalar_types( - bcx, - lhs.val, - rhs.val, - lhs.ty, - op.node, - expr.debug_loc()); - immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock() - } else { - assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty), - "built-in binary operators on fat pointers are homogeneous"); - trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs) - } - } - } -} - -pub fn cast_is_noop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - expr: &hir::Expr, - t_in: Ty<'tcx>, - t_out: Ty<'tcx>) - -> bool { - if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) { - return true; - } - - match (t_in.builtin_deref(true, ty::NoPreference), - t_out.builtin_deref(true, ty::NoPreference)) { - (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => { - t_in == t_out - } - _ => { - // This condition isn't redundant with the check for CoercionCast: - // different types can be substituted into the same type, and - // == equality can be overconservative if there are regions. - t_in == t_out - } - } -} - -fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - id: ast::NodeId) - -> DatumBlock<'blk, 'tcx, Expr> -{ - use rustc::ty::cast::CastTy::*; - use rustc::ty::cast::IntTy::*; - - fn int_cast(bcx: Block, - lldsttype: Type, - llsrctype: Type, - llsrc: ValueRef, - signed: bool) - -> ValueRef - { - let _icx = push_ctxt("int_cast"); - let srcsz = llsrctype.int_width(); - let dstsz = lldsttype.int_width(); - return if dstsz == srcsz { - BitCast(bcx, llsrc, lldsttype) - } else if srcsz > dstsz { - TruncOrBitCast(bcx, llsrc, lldsttype) - } else if signed { - SExtOrBitCast(bcx, llsrc, lldsttype) - } else { - ZExtOrBitCast(bcx, llsrc, lldsttype) - } - } - - fn float_cast(bcx: Block, - lldsttype: Type, - llsrctype: Type, - llsrc: ValueRef) - -> ValueRef - { - let _icx = push_ctxt("float_cast"); - let srcsz = llsrctype.float_width(); - let dstsz = lldsttype.float_width(); - return if dstsz > srcsz { - FPExt(bcx, llsrc, lldsttype) - } else if srcsz > dstsz { - FPTrunc(bcx, llsrc, lldsttype) - } else { llsrc }; - } - - let _icx = push_ctxt("trans_cast"); - let mut bcx = bcx; - let ccx = bcx.ccx(); - - let t_in = expr_ty_adjusted(bcx, expr); - let t_out = node_id_type(bcx, id); - - debug!("trans_cast({:?} as {:?})", t_in, t_out); - let mut ll_t_in = type_of::immediate_type_of(ccx, t_in); - let ll_t_out = type_of::immediate_type_of(ccx, t_out); - // Convert the value to be cast into a ValueRef, either by-ref or - // by-value as appropriate given its type: - let mut datum = unpack_datum!(bcx, trans(bcx, expr)); - - let datum_ty = monomorphize_type(bcx, datum.ty); - - if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) { - datum.ty = t_out; - return DatumBlock::new(bcx, datum); - } - - if type_is_fat_ptr(bcx.tcx(), t_in) { - assert!(datum.kind.is_by_ref()); - if type_is_fat_ptr(bcx.tcx(), t_out) { - return DatumBlock::new(bcx, Datum::new( - PointerCast(bcx, datum.val, ll_t_out.ptr_to()), - t_out, - Rvalue::new(ByRef) - )).to_expr_datumblock(); - } else { - // Return the address - return immediate_rvalue_bcx(bcx, - PointerCast(bcx, - Load(bcx, get_dataptr(bcx, datum.val)), - ll_t_out), - t_out).to_expr_datumblock(); - } - } - - let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast"); - let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast"); - - let (llexpr, signed) = if let Int(CEnum) = r_t_in { - let repr = adt::represent_type(ccx, t_in); - let datum = unpack_datum!( - bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id)); - let llexpr_ptr = datum.to_llref(); - let discr = adt::trans_get_discr(bcx, &repr, llexpr_ptr, - Some(Type::i64(ccx)), true); - ll_t_in = val_ty(discr); - (discr, adt::is_discr_signed(&repr)) - } else { - (datum.to_llscalarish(bcx), t_in.is_signed()) - }; - - let newval = match (r_t_in, r_t_out) { - (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => { - PointerCast(bcx, llexpr, ll_t_out) - } - (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out), - (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out), - - (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed), - (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr), - (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out), - (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out), - (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out), - (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out), - - _ => span_bug!(expr.span, - "translating unsupported cast: \ - {:?} -> {:?}", - t_in, - t_out) - }; - return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock(); -} - -fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - op: hir::BinOp, - dst: &hir::Expr, - src: &hir::Expr) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_assign_op"); - let mut bcx = bcx; - - debug!("trans_assign_op(expr={:?})", expr); - - // User-defined operator methods cannot be used with `+=` etc right now - assert!(!bcx.tcx().is_method_call(expr.id)); - - // Evaluate LHS (destination), which should be an lvalue - let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op")); - assert!(!bcx.fcx.type_needs_drop(dst.ty)); - let lhs = load_ty(bcx, dst.val, dst.ty); - let lhs = immediate_rvalue(lhs, dst.ty); - - // Evaluate RHS - FIXME(#28160) this sucks - let rhs = unpack_datum!(bcx, trans(bcx, &src)); - let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs")); - - // Perform computation and store the result - let result_datum = unpack_datum!( - bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs)); - return result_datum.store_to(bcx, dst.val); -} - -fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - datum: Datum<'tcx, Expr>, - expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - - // Ensure cleanup of `datum` if not already scheduled and obtain - // a "by ref" pointer. - let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id)); - - // Compute final type. Note that we are loose with the region and - // mutability, since those things don't matter in trans. - let referent_ty = lv_datum.ty; - let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReErased), referent_ty); - - // Construct the resulting datum. The right datum to return here would be an Lvalue datum, - // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers - // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of - // indirection and for thin pointers, this has no ill effects. - let kind = if type_is_sized(bcx.tcx(), referent_ty) { - RvalueExpr(Rvalue::new(ByValue)) - } else { - LvalueExpr(lv_datum.kind) - }; - - // Get the pointer. - let llref = lv_datum.to_llref(); - DatumBlock::new(bcx, Datum::new(llref, ptr_ty, kind)) -} - -fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - datum: Datum<'tcx, Expr>, - times: usize) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - let mut datum = datum; - for i in 0..times { - let method_call = MethodCall::autoderef(expr.id, i as u32); - datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call)); - } - DatumBlock { bcx: bcx, datum: datum } -} - -fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - datum: Datum<'tcx, Expr>, - method_call: MethodCall) - -> DatumBlock<'blk, 'tcx, Expr> { - let ccx = bcx.ccx(); - - debug!("deref_once(expr={:?}, datum={:?}, method_call={:?})", - expr, datum, method_call); - - let mut bcx = bcx; - - // Check for overloaded deref. - let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned(); - let datum = match method { - Some(method) => { - let method_ty = monomorphize_type(bcx, method.ty); - - // Overloaded. Invoke the deref() method, which basically - // converts from the `Smaht` pointer that we have into - // a `&T` pointer. We can then proceed down the normal - // path (below) to dereference that `&T`. - let datum = if method_call.autoderef == 0 { - datum - } else { - // Always perform an AutoPtr when applying an overloaded auto-deref - unpack_datum!(bcx, auto_ref(bcx, datum, expr)) - }; - - let ref_ty = // invoked methods have their LB regions instantiated - ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap(); - let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref"); - - bcx = Callee::method(bcx, method) - .call(bcx, expr.debug_loc(), - ArgOverloadedOp(datum, None), - Some(SaveIn(scratch.val))).bcx; - scratch.to_expr_datum() - } - None => { - // Not overloaded. We already have a pointer we know how to deref. - datum - } - }; - - let r = match datum.ty.sty { - ty::TyBox(content_ty) => { - // Make sure we have an lvalue datum here to get the - // proper cleanups scheduled - let datum = unpack_datum!( - bcx, datum.to_lvalue_datum(bcx, "deref", expr.id)); - - if type_is_sized(bcx.tcx(), content_ty) { - let ptr = load_ty(bcx, datum.val, datum.ty); - DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind))) - } else { - // A fat pointer and a DST lvalue have the same representation - // just different types. Since there is no temporary for `*e` - // here (because it is unsized), we cannot emulate the sized - // object code path for running drop glue and free. Instead, - // we schedule cleanup for `e`, turning it into an lvalue. - - let lval = Lvalue::new("expr::deref_once ty_uniq"); - let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval)); - DatumBlock::new(bcx, datum) - } - } - - ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) | - ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => { - let lval = Lvalue::new("expr::deref_once ptr"); - if type_is_sized(bcx.tcx(), content_ty) { - let ptr = datum.to_llscalarish(bcx); - - // Always generate an lvalue datum, even if datum.mode is - // an rvalue. This is because datum.mode is only an - // rvalue for non-owning pointers like &T or *T, in which - // case cleanup *is* scheduled elsewhere, by the true - // owner (or, in the case of *T, by the user). - DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval))) - } else { - // A fat pointer and a DST lvalue have the same representation - // just different types. - DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval))) - } - } - - _ => { - span_bug!( - expr.span, - "deref invoked on expr of invalid type {:?}", - datum.ty); - } - }; - - debug!("deref_once(expr={}, method_call={:?}, result={:?})", - expr.id, method_call, r.datum); - - return r; -} - -#[derive(Debug)] -enum OverflowOp { - Add, - Sub, - Mul, - Shl, - Shr, -} - -impl OverflowOp { - fn codegen_strategy(&self) -> OverflowCodegen { - use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck}; - match *self { - OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add), - OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub), - OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul), - - OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl), - OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr), - } - } -} - -enum OverflowCodegen { - ViaIntrinsic(OverflowOpViaIntrinsic), - ViaInputCheck(OverflowOpViaInputCheck), -} - -enum OverflowOpViaInputCheck { Shl, Shr, } - -#[derive(Debug)] -enum OverflowOpViaIntrinsic { Add, Sub, Mul, } - -impl OverflowOpViaIntrinsic { - fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef { - let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty); - bcx.ccx().get_intrinsic(&name) - } - fn to_intrinsic_name(&self, tcx: TyCtxt, ty: Ty) -> &'static str { - use syntax::ast::IntTy::*; - use syntax::ast::UintTy::*; - use rustc::ty::{TyInt, TyUint}; - - let new_sty = match ty.sty { - TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] { - "16" => TyInt(I16), - "32" => TyInt(I32), - "64" => TyInt(I64), - _ => bug!("unsupported target word size") - }, - TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] { - "16" => TyUint(U16), - "32" => TyUint(U32), - "64" => TyUint(U64), - _ => bug!("unsupported target word size") - }, - ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(), - _ => bug!("tried to get overflow intrinsic for {:?} applied to non-int type", - *self) - }; - - match *self { - OverflowOpViaIntrinsic::Add => match new_sty { - TyInt(I8) => "llvm.sadd.with.overflow.i8", - TyInt(I16) => "llvm.sadd.with.overflow.i16", - TyInt(I32) => "llvm.sadd.with.overflow.i32", - TyInt(I64) => "llvm.sadd.with.overflow.i64", - - TyUint(U8) => "llvm.uadd.with.overflow.i8", - TyUint(U16) => "llvm.uadd.with.overflow.i16", - TyUint(U32) => "llvm.uadd.with.overflow.i32", - TyUint(U64) => "llvm.uadd.with.overflow.i64", - - _ => bug!(), - }, - OverflowOpViaIntrinsic::Sub => match new_sty { - TyInt(I8) => "llvm.ssub.with.overflow.i8", - TyInt(I16) => "llvm.ssub.with.overflow.i16", - TyInt(I32) => "llvm.ssub.with.overflow.i32", - TyInt(I64) => "llvm.ssub.with.overflow.i64", - - TyUint(U8) => "llvm.usub.with.overflow.i8", - TyUint(U16) => "llvm.usub.with.overflow.i16", - TyUint(U32) => "llvm.usub.with.overflow.i32", - TyUint(U64) => "llvm.usub.with.overflow.i64", - - _ => bug!(), - }, - OverflowOpViaIntrinsic::Mul => match new_sty { - TyInt(I8) => "llvm.smul.with.overflow.i8", - TyInt(I16) => "llvm.smul.with.overflow.i16", - TyInt(I32) => "llvm.smul.with.overflow.i32", - TyInt(I64) => "llvm.smul.with.overflow.i64", - - TyUint(U8) => "llvm.umul.with.overflow.i8", - TyUint(U16) => "llvm.umul.with.overflow.i16", - TyUint(U32) => "llvm.umul.with.overflow.i32", - TyUint(U64) => "llvm.umul.with.overflow.i64", - - _ => bug!(), - }, - } - } - - fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, - info: NodeIdAndSpan, - lhs_t: Ty<'tcx>, lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) - -> (Block<'blk, 'tcx>, ValueRef) { - use rustc_const_math::{ConstMathErr, Op}; - - let llfn = self.to_intrinsic(bcx, lhs_t); - - let val = Call(bcx, llfn, &[lhs, rhs], binop_debug_loc); - let result = ExtractValue(bcx, val, 0); // iN operation result - let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?" - - let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false), - binop_debug_loc); - - let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1"); - let expected = Call(bcx, expect, &[cond, C_bool(bcx.ccx(), false)], - binop_debug_loc); - - let op = match *self { - OverflowOpViaIntrinsic::Add => Op::Add, - OverflowOpViaIntrinsic::Sub => Op::Sub, - OverflowOpViaIntrinsic::Mul => Op::Mul - }; - - let bcx = - base::with_cond(bcx, expected, |bcx| - controlflow::trans_fail(bcx, info, - InternedString::new(ConstMathErr::Overflow(op).description()))); - - (bcx, result) - } -} - -impl OverflowOpViaInputCheck { - fn build_with_input_check<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - info: NodeIdAndSpan, - lhs_t: Ty<'tcx>, - lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) - -> (Block<'blk, 'tcx>, ValueRef) - { - use rustc_const_math::{ConstMathErr, Op}; - - let lhs_llty = val_ty(lhs); - let rhs_llty = val_ty(rhs); - - // Panic if any bits are set outside of bits that we always - // mask in. - // - // Note that the mask's value is derived from the LHS type - // (since that is where the 32/64 distinction is relevant) but - // the mask's type must match the RHS type (since they will - // both be fed into an and-binop) - let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true); - - let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc); - let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc); - let (result, op) = match *self { - OverflowOpViaInputCheck::Shl => - (build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc), Op::Shl), - OverflowOpViaInputCheck::Shr => - (build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc), Op::Shr) - }; - let bcx = - base::with_cond(bcx, cond, |bcx| - controlflow::trans_fail(bcx, info, - InternedString::new(ConstMathErr::Overflow(op).description()))); - - (bcx, result) - } -} - -// Check if an integer or vector contains a nonzero element. -fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - value: ValueRef, - binop_debug_loc: DebugLoc) -> ValueRef { - let llty = val_ty(value); - let kind = llty.kind(); - match kind { - TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc), - TypeKind::Vector => { - // Check if any elements of the vector are nonzero by treating - // it as a wide integer and checking if the integer is nonzero. - let width = llty.vector_length() as u64 * llty.element_type().int_width(); - let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width)); - build_nonzero_check(bcx, int_value, binop_debug_loc) - }, - _ => bug!("build_nonzero_check: expected Integer or Vector, found {:?}", kind), - } -} - -fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan, - lhs_t: Ty<'tcx>, lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) - -> (Block<'blk, 'tcx>, ValueRef) { - if bcx.unreachable.get() { return (bcx, _Undef(lhs)); } - if bcx.ccx().check_overflow() { - - match oop.codegen_strategy() { - OverflowCodegen::ViaIntrinsic(oop) => - oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc), - OverflowCodegen::ViaInputCheck(oop) => - oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc), - } - } else { - let res = match oop { - OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc), - OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc), - OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc), - - OverflowOp::Shl => - build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc), - OverflowOp::Shr => - build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc), - }; - (bcx, res) - } -} - -/// We categorize expressions into three kinds. The distinction between -/// lvalue/rvalue is fundamental to the language. The distinction between the -/// two kinds of rvalues is an artifact of trans which reflects how we will -/// generate code for that kind of expression. See trans/expr.rs for more -/// information. -#[derive(Copy, Clone)] -enum ExprKind { - Lvalue, - RvalueDps, - RvalueDatum, - RvalueStmt -} - -fn expr_kind<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, expr: &hir::Expr) -> ExprKind { - if tcx.is_method_call(expr.id) { - // Overloaded operations are generally calls, and hence they are - // generated via DPS, but there are a few exceptions: - return match expr.node { - // `a += b` has a unit result. - hir::ExprAssignOp(..) => ExprKind::RvalueStmt, - - // the deref method invoked for `*a` always yields an `&T` - hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue, - - // the index method invoked for `a[i]` always yields an `&T` - hir::ExprIndex(..) => ExprKind::Lvalue, - - // in the general case, result could be any type, use DPS - _ => ExprKind::RvalueDps - }; - } - - match expr.node { - hir::ExprPath(..) => { - match tcx.expect_def(expr.id) { - // Put functions and ctors with the ADTs, as they - // are zero-sized, so DPS is the cheapest option. - Def::Struct(..) | Def::Variant(..) | - Def::Fn(..) | Def::Method(..) => { - ExprKind::RvalueDps - } - - // Note: there is actually a good case to be made that - // DefArg's, particularly those of immediate type, ought to - // considered rvalues. - Def::Static(..) | - Def::Upvar(..) | - Def::Local(..) => ExprKind::Lvalue, - - Def::Const(..) | - Def::AssociatedConst(..) => ExprKind::RvalueDatum, - - def => { - span_bug!( - expr.span, - "uncategorized def for expr {}: {:?}", - expr.id, - def); - } - } - } - - hir::ExprType(ref expr, _) => { - expr_kind(tcx, expr) - } - - hir::ExprUnary(hir::UnDeref, _) | - hir::ExprField(..) | - hir::ExprTupField(..) | - hir::ExprIndex(..) => { - ExprKind::Lvalue - } - - hir::ExprCall(..) | - hir::ExprMethodCall(..) | - hir::ExprStruct(..) | - hir::ExprTup(..) | - hir::ExprIf(..) | - hir::ExprMatch(..) | - hir::ExprClosure(..) | - hir::ExprBlock(..) | - hir::ExprRepeat(..) | - hir::ExprVec(..) => { - ExprKind::RvalueDps - } - - hir::ExprLit(ref lit) if lit.node.is_str() => { - ExprKind::RvalueDps - } - - hir::ExprBreak(..) | - hir::ExprAgain(..) | - hir::ExprRet(..) | - hir::ExprWhile(..) | - hir::ExprLoop(..) | - hir::ExprAssign(..) | - hir::ExprInlineAsm(..) | - hir::ExprAssignOp(..) => { - ExprKind::RvalueStmt - } - - hir::ExprLit(_) | // Note: LitStr is carved out above - hir::ExprUnary(..) | - hir::ExprBox(_) | - hir::ExprAddrOf(..) | - hir::ExprBinary(..) | - hir::ExprCast(..) => { - ExprKind::RvalueDatum - } - } -} diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 93e5f4ba1e..fe76ec05f6 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -19,23 +19,21 @@ use llvm::{ValueRef, get_param}; use middle::lang_items::ExchangeFreeFnLangItem; use rustc::ty::subst::{Substs}; use rustc::traits; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable}; use adt; -use adt::GetDtorType; // for tcx.dtor_type() use base::*; use build::*; -use callee::{Callee, ArgVals}; -use cleanup; -use cleanup::CleanupMethods; +use callee::{Callee}; use common::*; use debuginfo::DebugLoc; -use expr; use machine::*; use monomorphize; use trans_item::TransItem; +use tvec; use type_of::{type_of, sizing_type_of, align_of}; use type_::Type; use value::Value; +use Disr; use arena::TypedArena; use syntax_pos::DUMMY_SP; @@ -50,8 +48,8 @@ pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align]; - Callee::def(bcx.ccx(), def_id, bcx.tcx().mk_substs(Substs::empty())) - .call(bcx, debug_loc, ArgVals(&args), None).bcx + Callee::def(bcx.ccx(), def_id, Substs::empty(bcx.tcx())) + .call(bcx, debug_loc, &args, None).bcx } pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, @@ -94,6 +92,8 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> { assert!(t.is_normalized_for_trans()); + let t = tcx.erase_regions(&t); + // Even if there is no dtor for t, there might be one deeper down and we // might need to pass in the vtable ptr. if !type_is_sized(tcx, t) { @@ -115,7 +115,7 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, match t.sty { ty::TyBox(typ) if !type_needs_drop(tcx, typ) && type_is_sized(tcx, typ) => { - tcx.normalizing_infer_ctxt(traits::Reveal::All).enter(|infcx| { + tcx.infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| { let layout = t.layout(&infcx).unwrap(); if layout.size(&tcx.data_layout).bytes() == 0 { // `Box` does not allocate. @@ -133,20 +133,18 @@ pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>, debug_loc: DebugLoc) -> Block<'blk, 'tcx> { - drop_ty_core(bcx, v, t, debug_loc, false, None) + drop_ty_core(bcx, v, t, debug_loc, false) } pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>, debug_loc: DebugLoc, - skip_dtor: bool, - drop_hint: Option) + skip_dtor: bool) -> Block<'blk, 'tcx> { // NB: v is an *alias* of type t here, not a direct value. - debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint); + debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor); let _icx = push_ctxt("drop_ty"); - let mut bcx = bcx; if bcx.fcx.type_needs_drop(t) { let ccx = bcx.ccx(); let g = if skip_dtor { @@ -162,23 +160,8 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v }; - match drop_hint { - Some(drop_hint) => { - let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8); - let moved_val = - C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false); - let may_need_drop = - ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None); - bcx = with_cond(bcx, may_need_drop, |cx| { - Call(cx, glue, &[ptr], debug_loc); - cx - }) - } - None => { - // No drop-hint ==> call standard drop glue - Call(bcx, glue, &[ptr], debug_loc); - } - } + // No drop-hint ==> call standard drop glue + Call(bcx, glue, &[ptr], debug_loc); } bcx } @@ -193,7 +176,7 @@ pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let vp = alloc_ty(bcx, t, ""); call_lifetime_start(bcx, vp); store_ty(bcx, v, vp, t); - let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None); + let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor); call_lifetime_end(bcx, vp); bcx } @@ -233,34 +216,14 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) -> ValueRef { let g = g.map_ty(|t| get_drop_glue_type(ccx.tcx(), t)); match ccx.drop_glues().borrow().get(&g) { - Some(&(glue, _)) => return glue, + Some(&(glue, _)) => glue, None => { - debug!("Could not find drop glue for {:?} -- {} -- {}. \ - Falling back to on-demand instantiation.", + bug!("Could not find drop glue for {:?} -- {} -- {}.", g, TransItem::DropGlue(g).to_raw_string(), ccx.codegen_unit().name()); - - ccx.stats().n_fallback_instantiations.set(ccx.stats() - .n_fallback_instantiations - .get() + 1); } } - - // FIXME: #34151 - // Normally, getting here would indicate a bug in trans::collector, - // since it seems to have missed a translation item. When we are - // translating with non-MIR-based trans, however, the results of the - // collector are not entirely reliable since it bases its analysis - // on MIR. Thus, we'll instantiate the missing function on demand in - // this codegen unit, so that things keep working. - - TransItem::DropGlue(g).predefine(ccx, llvm::InternalLinkage); - TransItem::DropGlue(g).define(ccx); - - // Now that we made sure that the glue function is in ccx.drop_glues, - // give it another try - get_drop_glue_core(ccx, g) } pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, @@ -273,7 +236,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arena = TypedArena::new(); fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &arena); - let bcx = fcx.init(false, None); + let bcx = fcx.init(false); ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1); // All glue functions take values passed *by alias*; this is a @@ -288,46 +251,13 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fcx.finish(bcx, DebugLoc::None); } - -fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - struct_data: ValueRef) - -> Block<'blk, 'tcx> { - assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized"); - - let repr = adt::represent_type(bcx.ccx(), t); - let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &repr, struct_data)); - let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type()); - let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type()); - let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false); - - let bcx = if !bcx.ccx().check_drop_flag_for_sanity() { - bcx - } else { - let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type()); - let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false); - let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None); - let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None); - let drop_flag_neither_initialized_nor_cleared = - And(bcx, not_init, not_done, DebugLoc::None); - with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| { - let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap")); - Call(cx, llfn, &[], DebugLoc::None); - cx - }) - }; - - let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None); - with_cond(bcx, drop_flag_dtor_needed, |cx| { - trans_struct_drop(cx, t, struct_data) - }) -} -fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, - v0: ValueRef) + v0: ValueRef, + shallow_drop: bool) -> Block<'blk, 'tcx> { - debug!("trans_struct_drop t: {}", t); + debug!("trans_custom_dtor t: {}", t); let tcx = bcx.tcx(); let mut bcx = bcx; @@ -343,20 +273,26 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Issue #23611: schedule cleanup of contents, re-inspecting the // discriminant (if any) in case of variant swap in drop code. - bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t); + if !shallow_drop { + bcx.fcx.schedule_drop_adt_contents(contents_scope, v0, t); + } let (sized_args, unsized_args); let args: &[ValueRef] = if type_is_sized(tcx, t) { sized_args = [v0]; &sized_args } else { - unsized_args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))]; + // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments + unsized_args = [ + Load(bcx, get_dataptr(bcx, v0)), + Load(bcx, get_meta(bcx, v0)) + ]; &unsized_args }; let trait_ref = ty::Binder(ty::TraitRef { def_id: tcx.lang_items.drop_trait().unwrap(), - substs: tcx.mk_substs(Substs::empty().with_self_ty(t)) + substs: Substs::new_trait(tcx, t, &[]) }); let vtbl = match fulfill_obligation(bcx.ccx().shared(), DUMMY_SP, trait_ref) { traits::VtableImpl(data) => data, @@ -364,7 +300,7 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, }; let dtor_did = def.destructor().unwrap(); bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs) - .call(bcx, DebugLoc::None, ArgVals(args), None).bcx; + .call(bcx, DebugLoc::None, args, None).bcx; bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope) } @@ -389,16 +325,36 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, return (C_undef(llty), C_undef(llty)); } match t.sty { - ty::TyStruct(def, substs) => { + ty::TyAdt(def, substs) => { let ccx = bcx.ccx(); // First get the size of all statically known fields. - // Don't use type_of::sizing_type_of because that expects t to be sized. + // Don't use type_of::sizing_type_of because that expects t to be sized, + // and it also rounds up to alignment, which we want to avoid, + // as the unsized field's alignment could be smaller. assert!(!t.is_simd()); - let repr = adt::represent_type(ccx, t); - let sizing_type = adt::sizing_type_context_of(ccx, &repr, true); - debug!("DST {} sizing_type: {:?}", t, sizing_type); - let sized_size = llsize_of_alloc(ccx, sizing_type.prefix()); - let sized_align = llalign_of_min(ccx, sizing_type.prefix()); + let layout = ccx.layout_of(t); + debug!("DST {} layout: {:?}", t, layout); + + // Returns size in bytes of all fields except the last one + // (we will be recursing on the last one). + fn local_prefix_bytes(variant: &ty::layout::Struct) -> u64 { + let fields = variant.offset_after_field.len(); + if fields > 1 { + variant.offset_after_field[fields - 2].bytes() + } else { + 0 + } + } + + let (sized_size, sized_align) = match *layout { + ty::layout::Layout::Univariant { ref variant, .. } => { + (local_prefix_bytes(variant), variant.align.abi()) + } + _ => { + bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}", + t, layout); + } + }; debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); let sized_size = C_uint(ccx, sized_size); @@ -418,15 +374,7 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // here. But this is where the add would go.) // Return the sum of sizes and max of aligns. - let mut size = bcx.add(sized_size, unsized_size); - - // Issue #27023: If there is a drop flag, *now* we add 1 - // to the size. (We can do this without adding any - // padding because drop flags do not have any alignment - // constraints.) - if sizing_type.needs_drop_flag() { - size = bcx.add(size, C_uint(bcx.ccx(), 1_u64)); - } + let size = bcx.add(sized_size, unsized_size); // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). @@ -479,7 +427,9 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } } -fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>) +fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + v0: ValueRef, + g: DropGlueKind<'tcx>) -> Block<'blk, 'tcx> { let t = g.ty(); @@ -492,9 +442,6 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK // must definitely check for special bit-patterns corresponding to // the special dtor markings. - let inttype = Type::int(bcx.ccx()); - let dropped_pattern = C_integral(inttype, adt::DTOR_DONE_U64, false); - match t.sty { ty::TyBox(content_ty) => { // Support for TyBox is built-in and its drop glue is @@ -502,74 +449,39 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK // a safe-guard, assert TyBox not used with TyContents. assert!(!skip_dtor); if !type_is_sized(bcx.tcx(), content_ty) { - let llval = expr::get_dataptr(bcx, v0); + let llval = get_dataptr(bcx, v0); let llbox = Load(bcx, llval); - let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx())); - let drop_flag_not_dropped_already = - ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None); - with_cond(bcx, drop_flag_not_dropped_already, |bcx| { - let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); - let info = expr::get_meta(bcx, v0); - let info = Load(bcx, info); - let (llsize, llalign) = - size_and_align_of_dst(&bcx.build(), content_ty, info); - - // `Box` does not allocate. - let needs_free = ICmp(bcx, - llvm::IntNE, - llsize, - C_uint(bcx.ccx(), 0u64), - DebugLoc::None); - with_cond(bcx, needs_free, |bcx| { - trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None) - }) + let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); + // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments + let info = get_meta(bcx, v0); + let info = Load(bcx, info); + let (llsize, llalign) = + size_and_align_of_dst(&bcx.build(), content_ty, info); + + // `Box` does not allocate. + let needs_free = ICmp(bcx, + llvm::IntNE, + llsize, + C_uint(bcx.ccx(), 0u64), + DebugLoc::None); + with_cond(bcx, needs_free, |bcx| { + trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None) }) } else { let llval = v0; let llbox = Load(bcx, llval); - let llbox_as_usize = PtrToInt(bcx, llbox, inttype); - let drop_flag_not_dropped_already = - ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None); - with_cond(bcx, drop_flag_not_dropped_already, |bcx| { - let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None); - trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None) - }) - } - } - ty::TyStruct(def, _) | ty::TyEnum(def, _) => { - match (def.dtor_kind(), skip_dtor) { - (ty::TraitDtor(true), false) => { - // FIXME(16758) Since the struct is unsized, it is hard to - // find the drop flag (which is at the end of the struct). - // Lets just ignore the flag and pretend everything will be - // OK. - if type_is_sized(bcx.tcx(), t) { - trans_struct_drop_flag(bcx, t, v0) - } else { - // Give the user a heads up that we are doing something - // stupid and dangerous. - bcx.sess().warn(&format!("Ignoring drop flag in destructor for {} \ - because the struct is unsized. See issue \ - #16758", t)); - trans_struct_drop(bcx, t, v0) - } - } - (ty::TraitDtor(false), false) => { - trans_struct_drop(bcx, t, v0) - } - (ty::NoDtor, _) | (_, true) => { - // No dtor? Just the default case - iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None)) - } + let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None); + trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None) } } ty::TyTrait(..) => { // No support in vtable for distinguishing destroying with // versus without calling Drop::drop. Assert caller is // okay with always calling the Drop impl, if any. + // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments assert!(!skip_dtor); - let data_ptr = expr::get_dataptr(bcx, v0); - let vtable_ptr = Load(bcx, expr::get_meta(bcx, v0)); + let data_ptr = get_dataptr(bcx, v0); + let vtable_ptr = Load(bcx, get_meta(bcx, v0)); let dtor = Load(bcx, vtable_ptr); Call(bcx, dtor, @@ -577,15 +489,164 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK DebugLoc::None); bcx } + ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { + trans_custom_dtor(bcx, t, v0, def.is_union()) + } + ty::TyAdt(def, ..) if def.is_union() => { + bcx + } _ => { if bcx.fcx.type_needs_drop(t) { - iter_structural_ty(bcx, - v0, - t, - |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None)) + drop_structural_ty(bcx, v0, t) } else { bcx } } } } + +// Iterates through the elements of a structural type, dropping them. +fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, + av: ValueRef, + t: Ty<'tcx>) + -> Block<'blk, 'tcx> { + let _icx = push_ctxt("drop_structural_ty"); + + fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, + t: Ty<'tcx>, + av: adt::MaybeSizedValue, + variant: ty::VariantDef<'tcx>, + substs: &Substs<'tcx>) + -> Block<'blk, 'tcx> { + let _icx = push_ctxt("iter_variant"); + let tcx = cx.tcx(); + let mut cx = cx; + + for (i, field) in variant.fields.iter().enumerate() { + let arg = monomorphize::field_ty(tcx, substs, field); + cx = drop_ty(cx, + adt::trans_field_ptr(cx, t, av, Disr::from(variant.disr_val), i), + arg, DebugLoc::None); + } + return cx; + } + + let value = if type_is_sized(cx.tcx(), t) { + adt::MaybeSizedValue::sized(av) + } else { + // FIXME(#36457) -- we should pass unsized values as two arguments + let data = Load(cx, get_dataptr(cx, av)); + let info = Load(cx, get_meta(cx, av)); + adt::MaybeSizedValue::unsized_(data, info) + }; + + let mut cx = cx; + match t.sty { + ty::TyClosure(_, ref substs) => { + for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() { + let llupvar = adt::trans_field_ptr(cx, t, value, Disr(0), i); + cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None); + } + } + ty::TyArray(_, n) => { + let base = get_dataptr(cx, value.value); + let len = C_uint(cx.ccx(), n); + let unit_ty = t.sequence_element_type(cx.tcx()); + cx = tvec::slice_for_each(cx, base, unit_ty, len, + |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None)); + } + ty::TySlice(_) | ty::TyStr => { + let unit_ty = t.sequence_element_type(cx.tcx()); + cx = tvec::slice_for_each(cx, value.value, unit_ty, value.meta, + |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None)); + } + ty::TyTuple(ref args) => { + for (i, arg) in args.iter().enumerate() { + let llfld_a = adt::trans_field_ptr(cx, t, value, Disr(0), i); + cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None); + } + } + ty::TyAdt(adt, substs) => match adt.adt_kind() { + AdtKind::Struct => { + let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); + for (i, &Field(_, field_ty)) in fields.iter().enumerate() { + let llfld_a = adt::trans_field_ptr(cx, t, value, Disr::from(discr), i); + + let val = if type_is_sized(cx.tcx(), field_ty) { + llfld_a + } else { + // FIXME(#36457) -- we should pass unsized values as two arguments + let scratch = alloc_ty(cx, field_ty, "__fat_ptr_iter"); + Store(cx, llfld_a, get_dataptr(cx, scratch)); + Store(cx, value.meta, get_meta(cx, scratch)); + scratch + }; + cx = drop_ty(cx, val, field_ty, DebugLoc::None); + } + } + AdtKind::Union => { + bug!("Union in `glue::drop_structural_ty`"); + } + AdtKind::Enum => { + let fcx = cx.fcx; + let ccx = fcx.ccx; + let n_variants = adt.variants.len(); + + // NB: we must hit the discriminant first so that structural + // comparison know not to proceed when the discriminants differ. + + match adt::trans_switch(cx, t, av, false) { + (adt::BranchKind::Single, None) => { + if n_variants != 0 { + assert!(n_variants == 1); + cx = iter_variant(cx, t, adt::MaybeSizedValue::sized(av), + &adt.variants[0], substs); + } + } + (adt::BranchKind::Switch, Some(lldiscrim_a)) => { + cx = drop_ty(cx, lldiscrim_a, cx.tcx().types.isize, DebugLoc::None); + + // Create a fall-through basic block for the "else" case of + // the switch instruction we're about to generate. Note that + // we do **not** use an Unreachable instruction here, even + // though most of the time this basic block will never be hit. + // + // When an enum is dropped it's contents are currently + // overwritten to DTOR_DONE, which means the discriminant + // could have changed value to something not within the actual + // range of the discriminant. Currently this function is only + // used for drop glue so in this case we just return quickly + // from the outer function, and any other use case will only + // call this for an already-valid enum in which case the `ret + // void` will never be hit. + let ret_void_cx = fcx.new_block("enum-iter-ret-void"); + RetVoid(ret_void_cx, DebugLoc::None); + let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants); + let next_cx = fcx.new_block("enum-iter-next"); + + for variant in &adt.variants { + let variant_cx = fcx.new_block(&format!("enum-iter-variant-{}", + &variant.disr_val + .to_string())); + let case_val = adt::trans_case(cx, t, Disr::from(variant.disr_val)); + AddCase(llswitch, case_val, variant_cx.llbb); + let variant_cx = iter_variant(variant_cx, + t, + value, + variant, + substs); + Br(variant_cx, next_cx.llbb, DebugLoc::None); + } + cx = next_cx; + } + _ => ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"), + } + } + }, + + _ => { + cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t)) + } + } + return cx; +} diff --git a/src/librustc_trans/inline.rs b/src/librustc_trans/inline.rs deleted file mode 100644 index 8581fccf10..0000000000 --- a/src/librustc_trans/inline.rs +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::hir::def_id::DefId; -use base::push_ctxt; -use common::*; -use monomorphize::Instance; - -use rustc::dep_graph::DepNode; - -fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option { - debug!("instantiate_inline({:?})", fn_id); - let _icx = push_ctxt("instantiate_inline"); - let tcx = ccx.tcx(); - let _task = tcx.dep_graph.in_task(DepNode::TransInlinedItem(fn_id)); - - tcx.sess - .cstore - .maybe_get_item_ast(tcx, fn_id) - .map(|(_, inline_id)| { - tcx.map.local_def_id(inline_id) - }) -} - -pub fn get_local_instance(ccx: &CrateContext, fn_id: DefId) - -> Option { - if let Some(_) = ccx.tcx().map.as_local_node_id(fn_id) { - Some(fn_id) - } else { - instantiate_inline(ccx, fn_id) - } -} - -pub fn maybe_instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> DefId { - get_local_instance(ccx, fn_id).unwrap_or(fn_id) -} - -pub fn maybe_inline_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - instance: Instance<'tcx>) -> Instance<'tcx> { - let def_id = maybe_instantiate_inline(ccx, instance.def); - Instance { - def: def_id, - substs: instance.substs - } -} diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 7be173d17b..b1b09d3ca2 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -14,22 +14,14 @@ use arena::TypedArena; use intrinsics::{self, Intrinsic}; use libc; use llvm; -use llvm::{ValueRef, TypeKind}; -use rustc::ty::subst; -use rustc::ty::subst::FnSpace; +use llvm::{ValueRef}; use abi::{Abi, FnType}; use adt; use base::*; use build::*; -use callee::{self, Callee}; -use cleanup; -use cleanup::CleanupMethods; use common::*; -use consts; -use datum::*; use debuginfo::DebugLoc; use declare; -use expr; use glue; use type_of; use machine; @@ -38,11 +30,9 @@ use rustc::ty::{self, Ty}; use Disr; use rustc::hir; use syntax::ast; -use syntax::ptr::P; use syntax::parse::token; use rustc::session::Session; -use rustc_const_eval::fatal_const_eval_err; use syntax_pos::{Span, DUMMY_SP}; use std::cmp::Ordering; @@ -99,8 +89,8 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, callee_ty: Ty<'tcx>, fn_ty: &FnType, - args: callee::CallArgs<'a, 'tcx>, - dest: expr::Dest, + llargs: &[ValueRef], + llresult: ValueRef, call_debug_location: DebugLoc) -> Result<'blk, 'tcx> { let fcx = bcx.fcx; @@ -109,234 +99,50 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let _icx = push_ctxt("trans_intrinsic_call"); - let (def_id, substs, sig) = match callee_ty.sty { - ty::TyFnDef(def_id, substs, fty) => { - let sig = tcx.erase_late_bound_regions(&fty.sig); - (def_id, substs, tcx.normalize_associated_type(&sig)) - } + let (def_id, substs, fty) = match callee_ty.sty { + ty::TyFnDef(def_id, substs, ref fty) => (def_id, substs, fty), _ => bug!("expected fn item type, found {}", callee_ty) }; + + let sig = tcx.erase_late_bound_regions_and_normalize(&fty.sig); let arg_tys = sig.inputs; let ret_ty = sig.output; let name = tcx.item_name(def_id).as_str(); let span = match call_debug_location { - DebugLoc::At(_, span) | DebugLoc::ScopeAt(_, span) => span, + DebugLoc::ScopeAt(_, span) => span, DebugLoc::None => { span_bug!(fcx.span.unwrap_or(DUMMY_SP), "intrinsic `{}` called with missing span", name); } }; - let cleanup_scope = fcx.push_custom_cleanup_scope(); - - // For `transmute` we can just trans the input expr directly into dest - if name == "transmute" { - let llret_ty = type_of::type_of(ccx, ret_ty); - match args { - callee::ArgExprs(arg_exprs) => { - assert_eq!(arg_exprs.len(), 1); - - let (in_type, out_type) = (*substs.types.get(FnSpace, 0), - *substs.types.get(FnSpace, 1)); - let llintype = type_of::type_of(ccx, in_type); - let llouttype = type_of::type_of(ccx, out_type); - - let in_type_size = machine::llbitsize_of_real(ccx, llintype); - let out_type_size = machine::llbitsize_of_real(ccx, llouttype); - - if let ty::TyFnDef(def_id, substs, _) = in_type.sty { - if out_type_size != 0 { - // FIXME #19925 Remove this hack after a release cycle. - let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0])); - let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val; - let llfnty = val_ty(llfn); - let llresult = match dest { - expr::SaveIn(d) => d, - expr::Ignore => alloc_ty(bcx, out_type, "ret") - }; - Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to())); - if dest == expr::Ignore { - bcx = glue::drop_ty(bcx, llresult, out_type, - call_debug_location); - } - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - return Result::new(bcx, llresult); - } - } - - // This should be caught by the intrinsicck pass - assert_eq!(in_type_size, out_type_size); - - let nonpointer_nonaggregate = |llkind: TypeKind| -> bool { - use llvm::TypeKind::*; - match llkind { - Half | Float | Double | X86_FP80 | FP128 | - PPC_FP128 | Integer | Vector | X86_MMX => true, - _ => false - } - }; - - // An approximation to which types can be directly cast via - // LLVM's bitcast. This doesn't cover pointer -> pointer casts, - // but does, importantly, cover SIMD types. - let in_kind = llintype.kind(); - let ret_kind = llret_ty.kind(); - let bitcast_compatible = - (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || { - in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer - }; - - let dest = if bitcast_compatible { - // if we're here, the type is scalar-like (a primitive, a - // SIMD type or a pointer), and so can be handled as a - // by-value ValueRef and can also be directly bitcast to the - // target type. Doing this special case makes conversions - // like `u32x4` -> `u64x2` much nicer for LLVM and so more - // efficient (these are done efficiently implicitly in C - // with the `__m128i` type and so this means Rust doesn't - // lose out there). - let expr = &arg_exprs[0]; - let datum = unpack_datum!(bcx, expr::trans(bcx, expr)); - let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp")); - let val = if datum.kind.is_by_ref() { - load_ty(bcx, datum.val, datum.ty) - } else { - from_immediate(bcx, datum.val) - }; - - let cast_val = BitCast(bcx, val, llret_ty); - - match dest { - expr::SaveIn(d) => { - // this often occurs in a sequence like `Store(val, - // d); val2 = Load(d)`, so disappears easily. - Store(bcx, cast_val, d); - } - expr::Ignore => {} - } - dest - } else { - // The types are too complicated to do with a by-value - // bitcast, so pointer cast instead. We need to cast the - // dest so the types work out. - let dest = match dest { - expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())), - expr::Ignore => expr::Ignore - }; - bcx = expr::trans_into(bcx, &arg_exprs[0], dest); - dest - }; - - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - - return match dest { - expr::SaveIn(d) => Result::new(bcx, d), - expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to())) - }; - - } - - _ => { - bug!("expected expr as argument for transmute"); - } - } - } - - // For `move_val_init` we can evaluate the destination address - // (the first argument) and then trans the source value (the - // second argument) directly into the resulting destination - // address. - if name == "move_val_init" { - if let callee::ArgExprs(ref exprs) = args { - let (dest_expr, source_expr) = if exprs.len() != 2 { - bug!("expected two exprs as arguments for `move_val_init` intrinsic"); - } else { - (&exprs[0], &exprs[1]) - }; - - // evaluate destination address - let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr)); - let dest_datum = unpack_datum!( - bcx, dest_datum.to_rvalue_datum(bcx, "arg")); - let dest_datum = unpack_datum!( - bcx, dest_datum.to_appropriate_datum(bcx)); - - // `expr::trans_into(bcx, expr, dest)` is equiv to - // - // `trans(bcx, expr).store_to_dest(dest)`, - // - // which for `dest == expr::SaveIn(addr)`, is equivalent to: - // - // `trans(bcx, expr).store_to(bcx, addr)`. - let lldest = expr::Dest::SaveIn(dest_datum.val); - bcx = expr::trans_into(bcx, source_expr, lldest); - - let llresult = C_nil(ccx); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - - return Result::new(bcx, llresult); - } else { - bug!("expected two exprs as arguments for `move_val_init` intrinsic"); - } - } - - // save the actual AST arguments for later (some places need to do - // const-evaluation on them) - let expr_arguments = match args { - callee::ArgExprs(args) => Some(args), - _ => None, - }; - - // Push the arguments. - let mut llargs = Vec::new(); - bcx = callee::trans_args(bcx, - Abi::RustIntrinsic, - fn_ty, - &mut callee::Intrinsic, - args, - &mut llargs, - cleanup::CustomScope(cleanup_scope)); - - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - // These are the only intrinsic functions that diverge. if name == "abort" { let llfn = ccx.get_intrinsic(&("llvm.trap")); Call(bcx, llfn, &[], call_debug_location); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); Unreachable(bcx); return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to())); } else if &name[..] == "unreachable" { - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); Unreachable(bcx); return Result::new(bcx, C_nil(ccx)); } let llret_ty = type_of::type_of(ccx, ret_ty); - // Get location to store the result. If the user does - // not care about the result, just make a stack slot - let llresult = match dest { - expr::SaveIn(d) => d, - expr::Ignore => { - if !type_is_zero_size(ccx, ret_ty) { - let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result"); - call_lifetime_start(bcx, llresult); - llresult - } else { - C_undef(llret_ty.ptr_to()) - } - } - }; - let simple = get_simple_intrinsic(ccx, &name); let llval = match (simple, &name[..]) { (Some(llfn), _) => { Call(bcx, llfn, &llargs, call_debug_location) } + (_, "likely") => { + let expect = ccx.get_intrinsic(&("llvm.expect.i1")); + Call(bcx, expect, &[llargs[0], C_bool(ccx, true)], call_debug_location) + } + (_, "unlikely") => { + let expect = ccx.get_intrinsic(&("llvm.expect.i1")); + Call(bcx, expect, &[llargs[0], C_bool(ccx, false)], call_debug_location) + } (_, "try") => { bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult, call_debug_location); @@ -347,12 +153,12 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, Call(bcx, llfn, &[], call_debug_location) } (_, "size_of") => { - let tp_ty = *substs.types.get(FnSpace, 0); + let tp_ty = substs.type_at(0); let lltp_ty = type_of::type_of(ccx, tp_ty); C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) } (_, "size_of_val") => { - let tp_ty = *substs.types.get(FnSpace, 0); + let tp_ty = substs.type_at(0); if !type_is_sized(tcx, tp_ty) { let (llsize, _) = glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]); @@ -363,11 +169,11 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } } (_, "min_align_of") => { - let tp_ty = *substs.types.get(FnSpace, 0); + let tp_ty = substs.type_at(0); C_uint(ccx, type_of::align_of(ccx, tp_ty)) } (_, "min_align_of_val") => { - let tp_ty = *substs.types.get(FnSpace, 0); + let tp_ty = substs.type_at(0); if !type_is_sized(tcx, tp_ty) { let (_, llalign) = glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]); @@ -377,41 +183,39 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } } (_, "pref_align_of") => { - let tp_ty = *substs.types.get(FnSpace, 0); + let tp_ty = substs.type_at(0); let lltp_ty = type_of::type_of(ccx, tp_ty); C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty)) } (_, "drop_in_place") => { - let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = if type_is_sized(tcx, tp_ty) { + let tp_ty = substs.type_at(0); + let is_sized = type_is_sized(tcx, tp_ty); + let ptr = if is_sized { llargs[0] } else { - let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp"); - Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val)); - Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val)); - fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val); - scratch.val + // FIXME(#36457) -- we should pass unsized values as two arguments + let scratch = alloc_ty(bcx, tp_ty, "drop"); + call_lifetime_start(bcx, scratch); + Store(bcx, llargs[0], get_dataptr(bcx, scratch)); + Store(bcx, llargs[1], get_meta(bcx, scratch)); + scratch }; glue::drop_ty(bcx, ptr, tp_ty, call_debug_location); + if !is_sized { + call_lifetime_end(bcx, ptr); + } C_nil(ccx) } (_, "type_name") => { - let tp_ty = *substs.types.get(FnSpace, 0); + let tp_ty = substs.type_at(0); let ty_name = token::intern_and_get_ident(&tp_ty.to_string()); C_str_slice(ccx, ty_name) } (_, "type_id") => { - C_u64(ccx, ccx.tcx().type_id_hash(*substs.types.get(FnSpace, 0))) - } - (_, "init_dropped") => { - let tp_ty = *substs.types.get(FnSpace, 0); - if !type_is_zero_size(ccx, tp_ty) { - drop_done_fill_mem(bcx, llresult, tp_ty); - } - C_nil(ccx) + C_u64(ccx, ccx.tcx().type_id_hash(substs.type_at(0))) } (_, "init") => { - let tp_ty = *substs.types.get(FnSpace, 0); + let tp_ty = substs.type_at(0); if !type_is_zero_size(ccx, tp_ty) { // Just zero out the stack slot. (See comment on base::memzero for explanation) init_zero_mem(bcx, llresult, tp_ty); @@ -423,7 +227,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, C_nil(ccx) } (_, "needs_drop") => { - let tp_ty = *substs.types.get(FnSpace, 0); + let tp_ty = substs.type_at(0); C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty)) } @@ -442,7 +246,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, copy_intrinsic(bcx, false, false, - *substs.types.get(FnSpace, 0), + substs.type_at(0), llargs[1], llargs[0], llargs[2], @@ -452,7 +256,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, copy_intrinsic(bcx, true, false, - *substs.types.get(FnSpace, 0), + substs.type_at(0), llargs[1], llargs[0], llargs[2], @@ -461,7 +265,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, (_, "write_bytes") => { memset_intrinsic(bcx, false, - *substs.types.get(FnSpace, 0), + substs.type_at(0), llargs[0], llargs[1], llargs[2], @@ -472,7 +276,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, copy_intrinsic(bcx, false, true, - *substs.types.get(FnSpace, 0), + substs.type_at(0), llargs[0], llargs[1], llargs[2], @@ -482,7 +286,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, copy_intrinsic(bcx, true, true, - *substs.types.get(FnSpace, 0), + substs.type_at(0), llargs[0], llargs[1], llargs[2], @@ -491,14 +295,14 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, (_, "volatile_set_memory") => { memset_intrinsic(bcx, true, - *substs.types.get(FnSpace, 0), + substs.type_at(0), llargs[0], llargs[1], llargs[2], call_debug_location) } (_, "volatile_load") => { - let tp_ty = *substs.types.get(FnSpace, 0); + let tp_ty = substs.type_at(0); let mut ptr = llargs[0]; if let Some(ty) = fn_ty.ret.cast { ptr = PointerCast(bcx, ptr, ty.ptr_to()); @@ -510,10 +314,10 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, to_immediate(bcx, load, tp_ty) }, (_, "volatile_store") => { - let tp_ty = *substs.types.get(FnSpace, 0); + let tp_ty = substs.type_at(0); if type_is_fat_ptr(bcx.tcx(), tp_ty) { - VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0])); - VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0])); + VolatileStore(bcx, llargs[1], get_dataptr(bcx, llargs[0])); + VolatileStore(bcx, llargs[2], get_meta(bcx, llargs[0])); } else { let val = if fn_ty.args[1].is_indirect() { Load(bcx, llargs[1]) @@ -610,11 +414,10 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, }, (_, "discriminant_value") => { - let val_ty = substs.types.get(FnSpace, 0); + let val_ty = substs.type_at(0); match val_ty.sty { - ty::TyEnum(..) => { - let repr = adt::represent_type(ccx, *val_ty); - adt::trans_get_discr(bcx, &repr, llargs[0], + ty::TyAdt(adt, ..) if adt.is_enum() => { + adt::trans_get_discr(bcx, val_ty, llargs[0], Some(llret_ty), true) } _ => C_null(llret_ty) @@ -622,9 +425,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } (_, name) if name.starts_with("simd_") => { generic_simd_intrinsic(bcx, name, - substs, callee_ty, - expr_arguments, &llargs, ret_ty, llret_ty, call_debug_location, @@ -664,7 +465,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, match split[1] { "cxchg" | "cxchgweak" => { - let sty = &substs.types.get(FnSpace, 0).sty; + let sty = &substs.type_at(0).sty; if int_type_width_signed(sty, ccx).is_some() { let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2], @@ -683,7 +484,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } "load" => { - let sty = &substs.types.get(FnSpace, 0).sty; + let sty = &substs.type_at(0).sty; if int_type_width_signed(sty, ccx).is_some() { AtomicLoad(bcx, llargs[0], order) } else { @@ -696,7 +497,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } "store" => { - let sty = &substs.types.get(FnSpace, 0).sty; + let sty = &substs.type_at(0).sty; if int_type_width_signed(sty, ccx).is_some() { AtomicStore(bcx, llargs[1], llargs[0], order); } else { @@ -735,7 +536,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, _ => ccx.sess().fatal("unknown atomic operation") }; - let sty = &substs.types.get(FnSpace, 0).sty; + let sty = &substs.type_at(0).sty; if int_type_width_signed(sty, ccx).is_some() { AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order) } else { @@ -750,7 +551,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } - (_, _) => { + (..) => { let intr = match Intrinsic::find(&name) { Some(intr) => intr, None => bug!("unknown intrinsic '{}'", name), @@ -826,13 +627,10 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // destructors, and the contents are SIMD // etc. assert!(!bcx.fcx.type_needs_drop(arg_type)); - - let repr = adt::represent_type(bcx.ccx(), arg_type); - let repr_ptr = &repr; let arg = adt::MaybeSizedValue::sized(llarg); (0..contents.len()) .map(|i| { - Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i)) + Load(bcx, adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i)) }) .collect() } @@ -869,13 +667,13 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let llargs = if !any_changes_needed { // no aggregates to flatten, so no change needed - llargs + llargs.to_vec() } else { // there are some aggregates that need to be flattened // in the LLVM call, so we need to run over the types // again to find them and extract the arguments intr.inputs.iter() - .zip(&llargs) + .zip(llargs) .zip(&arg_tys) .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg)) .collect() @@ -920,17 +718,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } } - // If we made a temporary stack slot, let's clean it up - match dest { - expr::Ignore => { - bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location); - call_lifetime_end(bcx, llresult); - } - expr::SaveIn(_) => {} - } - - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - Result::new(bcx, llresult) } @@ -1065,10 +852,10 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, SetPersonalityFn(bcx, bcx.fcx.eh_personality()); - let normal = bcx.fcx.new_temp_block("normal"); - let catchswitch = bcx.fcx.new_temp_block("catchswitch"); - let catchpad = bcx.fcx.new_temp_block("catchpad"); - let caught = bcx.fcx.new_temp_block("caught"); + let normal = bcx.fcx.new_block("normal"); + let catchswitch = bcx.fcx.new_block("catchswitch"); + let catchpad = bcx.fcx.new_block("catchpad"); + let caught = bcx.fcx.new_block("caught"); let func = llvm::get_param(bcx.fcx.llfn, 0); let data = llvm::get_param(bcx.fcx.llfn, 1); @@ -1124,7 +911,7 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let tcx = ccx.tcx(); let tydesc = match tcx.lang_items.msvc_try_filter() { - Some(did) => ::consts::get_static(ccx, did).to_llref(), + Some(did) => ::consts::get_static(ccx, did), None => bug!("msvc_try_filter not defined"), }; let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]); @@ -1185,8 +972,8 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bcx.fcx.new_temp_block("then"); - let catch = bcx.fcx.new_temp_block("catch"); + let then = bcx.fcx.new_block("then"); + let catch = bcx.fcx.new_block("catch"); let func = llvm::get_param(bcx.fcx.llfn, 0); let data = llvm::get_param(bcx.fcx.llfn, 1); @@ -1241,8 +1028,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, let (fcx, block_arena); block_arena = TypedArena::new(); fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); - let bcx = fcx.init(true, None); - trans(bcx); + trans(fcx.init(true)); fcx.cleanup(); llfn } @@ -1284,9 +1070,7 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { fn generic_simd_intrinsic<'blk, 'tcx, 'a> (bcx: Block<'blk, 'tcx>, name: &str, - substs: &'tcx subst::Substs<'tcx>, callee_ty: Ty<'tcx>, - args: Option<&[P]>, llargs: &[ValueRef], ret_ty: Ty<'tcx>, llret_ty: Type, @@ -1323,8 +1107,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a> let tcx = bcx.tcx(); - let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig()); - let sig = tcx.normalize_associated_type(&sig); + let sig = tcx.erase_late_bound_regions_and_normalize(callee_ty.fn_sig()); let arg_tys = sig.inputs; // every intrinsic takes a SIMD vector as its first argument @@ -1387,20 +1170,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a> let total_len = in_len as u64 * 2; - let vector = match args { - Some(args) => { - match consts::const_expr(bcx.ccx(), &args[2], substs, None, - // this should probably help simd error reporting - consts::TrueConst::Yes) { - Ok((vector, _)) => vector, - Err(err) => { - fatal_const_eval_err(bcx.tcx(), err.as_inner(), span, - "shuffle indices"); - } - } - } - None => llargs[2] - }; + let vector = llargs[2]; let indices: Option> = (0..n) .map(|i| { diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 81a1dbeb7f..81c0c184f6 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -27,6 +27,7 @@ #![feature(box_syntax)] #![feature(const_fn)] #![feature(custom_attribute)] +#![feature(dotdot_in_tuple_patterns)] #![allow(unused_attributes)] #![feature(libc)] #![feature(quote)] @@ -35,7 +36,7 @@ #![feature(slice_patterns)] #![feature(staged_api)] #![feature(unicode)] -#![feature(question_mark)] +#![cfg_attr(stage0, feature(question_mark))] use rustc::dep_graph::WorkProduct; @@ -68,7 +69,6 @@ pub use base::trans_crate; pub use disr::Disr; pub mod back { - pub use rustc_back::rpath; pub use rustc::hir::svh; pub mod archive; @@ -78,6 +78,7 @@ pub mod back { pub mod symbol_names; pub mod write; pub mod msvc; + pub mod rpath; } pub mod diagnostics; @@ -98,8 +99,10 @@ mod cabi_aarch64; mod cabi_arm; mod cabi_asmjs; mod cabi_mips; +mod cabi_mips64; mod cabi_powerpc; mod cabi_powerpc64; +mod cabi_s390x; mod cabi_x86; mod cabi_x86_64; mod cabi_x86_win64; @@ -110,17 +113,12 @@ mod collector; mod common; mod consts; mod context; -mod controlflow; -mod datum; mod debuginfo; mod declare; mod disr; -mod expr; mod glue; -mod inline; mod intrinsic; mod machine; -mod _match; mod meth; mod mir; mod monomorphize; diff --git a/src/librustc_trans/machine.rs b/src/librustc_trans/machine.rs index 59020b38dd..cd31f02842 100644 --- a/src/librustc_trans/machine.rs +++ b/src/librustc_trans/machine.rs @@ -24,13 +24,6 @@ pub type llalign = u32; // ______________________________________________________________________ // compute sizeof / alignof -// Returns the number of bytes clobbered by a Store to this type. -pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> llsize { - unsafe { - return llvm::LLVMStoreSizeOfType(cx.td(), ty.to_ref()); - } -} - // Returns the number of bytes between successive elements of type T in an // array of T. This is the "ABI" size. It includes any ABI-mandated padding. pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize { @@ -39,28 +32,6 @@ pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize { } } -// Returns, as near as we can figure, the "real" size of a type. As in, the -// bits in this number of bytes actually carry data related to the datum -// with the type. Not junk, accidentally-damaged words, or whatever. -// Note that padding of the type will be included for structs, but not for the -// other types (i.e. SIMD types). -// Rounds up to the nearest byte though, so if you have a 1-bit -// value, we return 1 here, not 0. Most of rustc works in bytes. Be warned -// that LLVM *does* distinguish between e.g. a 1-bit value and an 8-bit value -// at the codegen level! In general you should prefer `llbitsize_of_real` -// below. -pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> llsize { - unsafe { - let nbits = llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref()); - if nbits & 7 != 0 { - // Not an even number of bytes, spills into "next" byte. - 1 + (nbits >> 3) - } else { - nbits >> 3 - } - } -} - /// Returns the "real" size of the type in bits. pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits { unsafe { diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 3d6093d4d6..dac70d4a1d 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -8,35 +8,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::rc::Rc; - use attributes; use arena::TypedArena; -use back::symbol_names; use llvm::{ValueRef, get_params}; -use rustc::hir::def_id::DefId; -use rustc::ty::subst::{FnSpace, Subst, Substs}; -use rustc::ty::subst; -use rustc::traits::{self, Reveal}; +use rustc::traits; use abi::FnType; use base::*; use build::*; -use callee::{Callee, Virtual, ArgVals, trans_fn_pointer_shim}; -use closure; +use callee::Callee; use common::*; use consts; use debuginfo::DebugLoc; use declare; -use expr; use glue; use machine; +use monomorphize::Instance; use type_::Type; use type_of::*; use value::Value; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; - -use syntax::ast::Name; -use syntax_pos::DUMMY_SP; +use rustc::ty; // drop_glue pointer, size, align. const VTABLE_OFFSET: usize = 3; @@ -75,56 +65,47 @@ pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// In fact, all virtual calls can be thought of as normal trait calls /// that go through this shim function. pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, - method_ty: Ty<'tcx>, - vtable_index: usize) + callee: Callee<'tcx>) -> ValueRef { let _icx = push_ctxt("trans_object_shim"); let tcx = ccx.tcx(); - debug!("trans_object_shim(vtable_index={}, method_ty={:?})", - vtable_index, - method_ty); + debug!("trans_object_shim({:?})", callee); + + let (sig, abi, function_name) = match callee.ty.sty { + ty::TyFnDef(def_id, substs, f) => { + let instance = Instance::new(def_id, substs); + (&f.sig, f.abi, instance.symbol_name(ccx.shared())) + } + _ => bug!() + }; - let sig = tcx.erase_late_bound_regions(&method_ty.fn_sig()); - let sig = tcx.normalize_associated_type(&sig); - let fn_ty = FnType::new(ccx, method_ty.fn_abi(), &sig, &[]); + let sig = tcx.erase_late_bound_regions_and_normalize(sig); + let fn_ty = FnType::new(ccx, abi, &sig, &[]); - let function_name = - symbol_names::internal_name_from_type_and_suffix(ccx, method_ty, "object_shim"); - let llfn = declare::define_internal_fn(ccx, &function_name, method_ty); + let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty); attributes::set_frame_pointer_elimination(ccx, llfn); let (block_arena, fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); - let mut bcx = fcx.init(false, None); - assert!(!fcx.needs_ret_allocas); - - - let dest = - fcx.llretslotptr.get().map( - |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))); - - debug!("trans_object_shim: method_offset_in_vtable={}", - vtable_index); + let mut bcx = fcx.init(false); + let dest = fcx.llretslotptr.get(); let llargs = get_params(fcx.llfn); - let args = ArgVals(&llargs[fcx.fn_ty.ret.is_indirect() as usize..]); - - let callee = Callee { - data: Virtual(vtable_index), - ty: method_ty - }; - bcx = callee.call(bcx, DebugLoc::None, args, dest).bcx; + bcx = callee.call(bcx, DebugLoc::None, + &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).bcx; fcx.finish(bcx, DebugLoc::None); llfn } -/// Creates a returns a dynamic vtable for the given type and vtable origin. +/// Creates a dynamic vtable for the given type and vtable origin. /// This is used only for objects. /// +/// The vtables are cached instead of created on every call. +/// /// The `trait_ref` encodes the erased self type. Hence if we are /// making an object `Foo` from a value of type `Foo`, then /// `trait_ref` would map `T:Trait`. @@ -144,72 +125,23 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } // Not in the cache. Build it. - let methods = traits::supertraits(tcx, trait_ref.clone()).flat_map(|trait_ref| { - let vtable = fulfill_obligation(ccx.shared(), DUMMY_SP, trait_ref.clone()); - match vtable { - // Should default trait error here? - traits::VtableDefaultImpl(_) | - traits::VtableBuiltin(_) => { - Vec::new().into_iter() - } - traits::VtableImpl( - traits::VtableImplData { - impl_def_id: id, - substs, - nested: _ }) => { - let nullptr = C_null(Type::nil(ccx).ptr_to()); - get_vtable_methods(tcx, id, substs) - .into_iter() - .map(|opt_mth| opt_mth.map_or(nullptr, |mth| { - Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx).val - })) - .collect::>() - .into_iter() - } - traits::VtableClosure( - traits::VtableClosureData { - closure_def_id, - substs, - nested: _ }) => { - let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_ref.def_id()).unwrap(); - let llfn = closure::trans_closure_method(ccx, - closure_def_id, - substs, - trait_closure_kind); - vec![llfn].into_iter() - } - traits::VtableFnPointer( - traits::VtableFnPointerData { - fn_ty: bare_fn_ty, - nested: _ }) => { - let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_ref.def_id()).unwrap(); - vec![trans_fn_pointer_shim(ccx, trait_closure_kind, bare_fn_ty)].into_iter() - } - traits::VtableObject(ref data) => { - // this would imply that the Self type being erased is - // an object type; this cannot happen because we - // cannot cast an unsized type into a trait object - bug!("cannot get vtable for an object type: {:?}", - data); - } - traits::VtableParam(..) => { - bug!("resolved vtable for {:?} to bad vtable {:?} in trans", - trait_ref, - vtable); - } - } + let nullptr = C_null(Type::nil(ccx).ptr_to()); + let methods = traits::get_vtable_methods(tcx, trait_ref).map(|opt_mth| { + opt_mth.map_or(nullptr, |(def_id, substs)| { + Callee::def(ccx, def_id, substs).reify(ccx) + }) }); let size_ty = sizing_type_of(ccx, trait_ref.self_ty()); let size = machine::llsize_of_alloc(ccx, size_ty); let align = align_of(ccx, trait_ref.self_ty()); - let components: Vec<_> = vec![ + let components: Vec<_> = [ // Generate a destructor for the vtable. glue::get_drop_glue(ccx, trait_ref.self_ty()), C_uint(ccx, size), C_uint(ccx, align) - ].into_iter().chain(methods).collect(); + ].iter().cloned().chain(methods).collect(); let vtable_const = C_struct(ccx, &components, false); let align = machine::llalign_of_pref(ccx, val_ty(vtable_const)); @@ -218,126 +150,3 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ccx.vtables().borrow_mut().insert(trait_ref, vtable); vtable } - -pub fn get_vtable_methods<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - impl_id: DefId, - substs: &'tcx subst::Substs<'tcx>) - -> Vec>> -{ - debug!("get_vtable_methods(impl_id={:?}, substs={:?}", impl_id, substs); - - let trt_id = match tcx.impl_trait_ref(impl_id) { - Some(t_id) => t_id.def_id, - None => bug!("make_impl_vtable: don't know how to \ - make a vtable for a type impl!") - }; - - tcx.populate_implementations_for_trait_if_necessary(trt_id); - - let trait_item_def_ids = tcx.trait_item_def_ids(trt_id); - trait_item_def_ids - .iter() - - // Filter out non-method items. - .filter_map(|item_def_id| { - match *item_def_id { - ty::MethodTraitItemId(def_id) => Some(def_id), - _ => None, - } - }) - - // Now produce pointers for each remaining method. If the - // method could never be called from this object, just supply - // null. - .map(|trait_method_def_id| { - debug!("get_vtable_methods: trait_method_def_id={:?}", - trait_method_def_id); - - let trait_method_type = match tcx.impl_or_trait_item(trait_method_def_id) { - ty::MethodTraitItem(m) => m, - _ => bug!("should be a method, not other assoc item"), - }; - let name = trait_method_type.name; - - // Some methods cannot be called on an object; skip those. - if !tcx.is_vtable_safe_method(trt_id, &trait_method_type) { - debug!("get_vtable_methods: not vtable safe"); - return None; - } - - debug!("get_vtable_methods: trait_method_type={:?}", - trait_method_type); - - // the method may have some early-bound lifetimes, add - // regions for those - let num_dummy_regions = trait_method_type.generics.regions.len(FnSpace); - let dummy_regions = vec![ty::ReErased; num_dummy_regions]; - let method_substs = substs.clone() - .with_method(vec![], dummy_regions); - let method_substs = tcx.mk_substs(method_substs); - - // The substitutions we have are on the impl, so we grab - // the method type from the impl to substitute into. - let mth = get_impl_method(tcx, impl_id, method_substs, name); - - debug!("get_vtable_methods: mth={:?}", mth); - - // If this is a default method, it's possible that it - // relies on where clauses that do not hold for this - // particular set of type parameters. Note that this - // method could then never be called, so we do not want to - // try and trans it, in that case. Issue #23435. - if mth.is_provided { - let predicates = mth.method.predicates.predicates.subst(tcx, &mth.substs); - if !normalize_and_test_predicates(tcx, predicates.into_vec()) { - debug!("get_vtable_methods: predicates do not hold"); - return None; - } - } - - Some(mth) - }) - .collect() -} - -#[derive(Debug)] -pub struct ImplMethod<'tcx> { - pub method: Rc>, - pub substs: &'tcx Substs<'tcx>, - pub is_provided: bool -} - -/// Locates the applicable definition of a method, given its name. -pub fn get_impl_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - impl_def_id: DefId, - substs: &'tcx Substs<'tcx>, - name: Name) - -> ImplMethod<'tcx> -{ - assert!(!substs.types.needs_infer()); - - let trait_def_id = tcx.trait_id_of_impl(impl_def_id).unwrap(); - let trait_def = tcx.lookup_trait_def(trait_def_id); - - match trait_def.ancestors(impl_def_id).fn_defs(tcx, name).next() { - Some(node_item) => { - let substs = tcx.normalizing_infer_ctxt(Reveal::All).enter(|infcx| { - let substs = traits::translate_substs(&infcx, impl_def_id, - substs, node_item.node); - tcx.lift(&substs).unwrap_or_else(|| { - bug!("trans::meth::get_impl_method: translate_substs \ - returned {:?} which contains inference types/regions", - substs); - }) - }); - ImplMethod { - method: node_item.item, - substs: substs, - is_provided: node_item.node.is_from_trait(), - } - } - None => { - bug!("method {:?} not found in {:?}", name, impl_def_id) - } - } -} diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 66eb78aef0..5de59b9f6b 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -15,6 +15,7 @@ use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc::mir::repr as mir; use rustc::mir::repr::TerminatorKind; +use rustc::mir::repr::Location; use rustc::mir::visit::{Visitor, LvalueContext}; use rustc::mir::traversal; use common::{self, Block, BlockAndBuilder}; @@ -104,7 +105,8 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> { fn visit_assign(&mut self, block: mir::BasicBlock, lvalue: &mir::Lvalue<'tcx>, - rvalue: &mir::Rvalue<'tcx>) { + rvalue: &mir::Rvalue<'tcx>, + location: Location) { debug!("visit_assign(block={:?}, lvalue={:?}, rvalue={:?})", block, lvalue, rvalue); if let Some(index) = self.mir.local_index(lvalue) { @@ -113,15 +115,16 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> { self.mark_as_lvalue(index); } } else { - self.visit_lvalue(lvalue, LvalueContext::Store); + self.visit_lvalue(lvalue, LvalueContext::Store, location); } - self.visit_rvalue(rvalue); + self.visit_rvalue(rvalue, location); } fn visit_terminator_kind(&mut self, block: mir::BasicBlock, - kind: &mir::TerminatorKind<'tcx>) { + kind: &mir::TerminatorKind<'tcx>, + location: Location) { match *kind { mir::TerminatorKind::Call { func: mir::Operand::Constant(mir::Constant { @@ -133,18 +136,19 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> { // is not guaranteed to be statically dominated by the // definition of x, so x must always be in an alloca. if let mir::Operand::Consume(ref lvalue) = args[0] { - self.visit_lvalue(lvalue, LvalueContext::Drop); + self.visit_lvalue(lvalue, LvalueContext::Drop, location); } } _ => {} } - self.super_terminator_kind(block, kind); + self.super_terminator_kind(block, kind, location); } fn visit_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>, - context: LvalueContext) { + context: LvalueContext<'tcx>, + location: Location) { debug!("visit_lvalue(lvalue={:?}, context={:?})", lvalue, context); // Allow uses of projections of immediate pair fields. @@ -176,8 +180,7 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> { LvalueContext::Store | LvalueContext::Inspect | LvalueContext::Borrow { .. } | - LvalueContext::Slice { .. } | - LvalueContext::Projection => { + LvalueContext::Projection(..) => { self.mark_as_lvalue(index); } @@ -196,11 +199,11 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> { // A deref projection only reads the pointer, never needs the lvalue. if let mir::Lvalue::Projection(ref proj) = *lvalue { if let mir::ProjectionElem::Deref = proj.elem { - return self.visit_lvalue(&proj.base, LvalueContext::Consume); + return self.visit_lvalue(&proj.base, LvalueContext::Consume, location); } } - self.super_lvalue(lvalue, context); + self.super_lvalue(lvalue, context, location); } } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 6d03b44744..83b3880c38 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -9,7 +9,7 @@ // except according to those terms. use llvm::{self, ValueRef}; -use rustc_const_eval::ErrKind; +use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err}; use rustc::middle::lang_items; use rustc::ty; use rustc::mir::repr as mir; @@ -23,7 +23,6 @@ use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; use debuginfo::DebugLoc; use Disr; -use expr; use machine::{llalign_of_min, llbitsize_of_real}; use meth; use type_of; @@ -79,7 +78,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = this.fcx.new_block(name, None).build(); + let trampoline = this.fcx.new_block(name).build(); trampoline.set_personality_fn(this.fcx.eh_personality()); trampoline.cleanup_ret(cp, Some(lltarget)); trampoline.llbb() @@ -140,9 +139,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => { let discr_lvalue = self.trans_lvalue(&bcx, discr); let ty = discr_lvalue.ty.to_ty(bcx.tcx()); - let repr = adt::represent_type(bcx.ccx(), ty); let discr = bcx.with_block(|bcx| - adt::trans_get_discr(bcx, &repr, discr_lvalue.llval, None, true) + adt::trans_get_discr(bcx, ty, discr_lvalue.llval, None, true) ); let mut bb_hist = FnvHashMap(); @@ -168,7 +166,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if default_bb != Some(target) { let llbb = llblock(self, target); let llval = bcx.with_block(|bcx| adt::trans_case( - bcx, &repr, Disr::from(adt_variant.disr_val))); + bcx, ty, Disr::from(adt_variant.disr_val))); build::AddCase(switch, llval, llbb) } } @@ -261,8 +259,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { bcx.with_block(|bcx| { let scratch = base::alloc_ty(bcx, ty, "drop"); base::call_lifetime_start(bcx, scratch); - build::Store(bcx, lvalue.llval, expr::get_dataptr(bcx, scratch)); - build::Store(bcx, lvalue.llextra, expr::get_meta(bcx, scratch)); + build::Store(bcx, lvalue.llval, base::get_dataptr(bcx, scratch)); + build::Store(bcx, lvalue.llextra, base::get_meta(bcx, scratch)); scratch }) }; @@ -310,7 +308,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); - let panic_block = self.fcx.new_block("panic", None); + let panic_block = self.fcx.new_block("panic"); if expected { bcx.cond_br(cond, lltarget, panic_block.llbb); } else { @@ -373,9 +371,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // is also constant, then we can produce a warning. if const_cond == Some(!expected) { if let Some(err) = const_err { - let _ = consts::const_err(bcx.ccx(), span, - Err::<(), _>(err), - consts::TrueConst::No); + let err = ConstEvalErr{ span: span, kind: err }; + let mut diag = bcx.tcx().sess.struct_span_warn( + span, "this expression will panic at run-time"); + note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag); + diag.emit(); } } @@ -383,7 +383,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item); let callee = Callee::def(bcx.ccx(), def_id, bcx.ccx().empty_substs_for_def_id(def_id)); - let llfn = callee.reify(bcx.ccx()).val; + let llfn = callee.reify(bcx.ccx()); // Translate the actual panic invoke/call. if let Some(unwind) = cleanup { @@ -419,11 +419,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { _ => bug!("{} is not callable", callee.ty) }; - let sig = bcx.tcx().erase_late_bound_regions(sig); + let sig = bcx.tcx().erase_late_bound_regions_and_normalize(sig); // Handle intrinsics old trans wants Expr's for, ourselves. let intrinsic = match (&callee.ty.sty, &callee.data) { - (&ty::TyFnDef(def_id, _, _), &Intrinsic) => { + (&ty::TyFnDef(def_id, ..), &Intrinsic) => { Some(bcx.tcx().item_name(def_id).as_str()) } _ => None @@ -516,28 +516,27 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let fn_ptr = match callee.data { NamedTupleConstructor(_) => { // FIXME translate this like mir::Rvalue::Aggregate. - callee.reify(bcx.ccx()).val + callee.reify(bcx.ccx()) } Intrinsic => { - use callee::ArgVals; - use expr::{Ignore, SaveIn}; use intrinsic::trans_intrinsic_call; let (dest, llargs) = match ret_dest { _ if fn_ty.ret.is_indirect() => { - (SaveIn(llargs[0]), &llargs[1..]) + (llargs[0], &llargs[1..]) + } + ReturnDest::Nothing => { + (C_undef(fn_ty.ret.original_ty.ptr_to()), &llargs[..]) } - ReturnDest::Nothing => (Ignore, &llargs[..]), ReturnDest::IndirectOperand(dst, _) | - ReturnDest::Store(dst) => (SaveIn(dst), &llargs[..]), + ReturnDest::Store(dst) => (dst, &llargs[..]), ReturnDest::DirectOperand(_) => bug!("Cannot use direct operand with an intrinsic call") }; bcx.with_block(|bcx| { trans_intrinsic_call(bcx, callee.ty, &fn_ty, - ArgVals(llargs), dest, - debug_loc); + &llargs, dest, debug_loc); }); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { @@ -701,10 +700,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // Handle both by-ref and immediate tuples. match tuple.val { Ref(llval) => { - let base_repr = adt::represent_type(bcx.ccx(), tuple.ty); let base = adt::MaybeSizedValue::sized(llval); for (n, &ty) in arg_types.iter().enumerate() { - let ptr = adt::trans_field_ptr_builder(bcx, &base_repr, base, Disr(0), n); + let ptr = adt::trans_field_ptr_builder(bcx, tuple.ty, base, Disr(0), n); let val = if common::type_is_fat_ptr(bcx.tcx(), ty) { let (lldata, llextra) = load_fat_ptr(bcx, ptr); Pair(lldata, llextra) @@ -785,7 +783,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let target = self.bcx(target_bb); - let block = self.fcx.new_block("cleanup", None); + let block = self.fcx.new_block("cleanup"); self.landing_pads[target_bb] = Some(block); let bcx = block.build(); @@ -828,7 +826,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> { self.unreachable_block.unwrap_or_else(|| { - let bl = self.fcx.new_block("unreachable", None); + let bl = self.fcx.new_block("unreachable"); bl.build().unreachable(); self.unreachable_block = Some(bl); bl @@ -897,10 +895,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if out_type_size != 0 { // FIXME #19925 Remove this hack after a release cycle. let f = Callee::def(bcx.ccx(), def_id, substs); - let datum = f.reify(bcx.ccx()); + let ty = match f.ty.sty { + ty::TyFnDef(.., f) => bcx.tcx().mk_fn_ptr(f), + _ => f.ty + }; val = OperandRef { - val: Immediate(datum.val), - ty: datum.ty + val: Immediate(f.reify(bcx.ccx())), + ty: ty }; } } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 2c83b7d07c..b74d56ce36 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -10,10 +10,10 @@ use llvm::{self, ValueRef}; use rustc::middle::const_val::ConstVal; -use rustc_const_eval::ErrKind; +use rustc_const_eval::{ErrKind, ConstEvalErr, report_const_eval_err}; use rustc_const_math::ConstInt::*; use rustc_const_math::ConstFloat::*; -use rustc_const_math::ConstMathErr; +use rustc_const_math::{ConstInt, ConstIsize, ConstUsize, ConstMathErr}; use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; use rustc::mir::repr as mir; @@ -23,17 +23,19 @@ use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::subst::Substs; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use {abi, adt, base, Disr}; +use {abi, adt, base, Disr, machine}; use callee::Callee; -use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty}; +use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty, type_is_sized}; use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral}; use common::{C_null, C_struct, C_str_slice, C_undef, C_uint}; -use consts::{self, ConstEvalFailure, TrueConst, to_const_int}; +use common::{const_to_opt_int, const_to_opt_uint}; +use consts; use monomorphize::{self, Instance}; use type_of; use type_::Type; use value::Value; +use syntax::ast; use syntax_pos::{Span, DUMMY_SP}; use std::fmt; @@ -237,22 +239,23 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { fn trans_def(ccx: &'a CrateContext<'a, 'tcx>, mut instance: Instance<'tcx>, args: IndexVec>) - -> Result, ConstEvalFailure> { + -> Result, ConstEvalErr> { // Try to resolve associated constants. - if instance.substs.self_ty().is_some() { - // Only trait items can have a Self parameter. - let trait_item = ccx.tcx().impl_or_trait_item(instance.def); - let trait_id = trait_item.container().id(); - let substs = instance.substs; - let trait_ref = ty::Binder(substs.to_trait_ref(ccx.tcx(), trait_id)); + if let Some(trait_id) = ccx.tcx().trait_of_item(instance.def) { + let trait_ref = ty::TraitRef::new(trait_id, instance.substs); + let trait_ref = ty::Binder(trait_ref); let vtable = common::fulfill_obligation(ccx.shared(), DUMMY_SP, trait_ref); if let traits::VtableImpl(vtable_impl) = vtable { let name = ccx.tcx().item_name(instance.def); - for ac in ccx.tcx().associated_consts(vtable_impl.impl_def_id) { - if ac.name == name { - instance = Instance::new(ac.def_id, vtable_impl.substs); - break; - } + let ac = ccx.tcx().impl_or_trait_items(vtable_impl.impl_def_id) + .iter().filter_map(|&def_id| { + match ccx.tcx().impl_or_trait_item(def_id) { + ty::ConstTraitItem(ac) => Some(ac), + _ => None + } + }).find(|ic| ic.name == name); + if let Some(ac) = ac { + instance = Instance::new(ac.def_id, vtable_impl.substs); } } } @@ -266,12 +269,12 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { fn monomorphize(&self, value: &T) -> T where T: TransNormalize<'tcx> { - monomorphize::apply_param_substs(self.ccx.tcx(), + monomorphize::apply_param_substs(self.ccx.shared(), self.substs, value) } - fn trans(&mut self) -> Result, ConstEvalFailure> { + fn trans(&mut self) -> Result, ConstEvalErr> { let tcx = self.ccx.tcx(); let mut bb = mir::START_BLOCK; @@ -293,7 +296,8 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } } mir::StatementKind::StorageLive(_) | - mir::StatementKind::StorageDead(_) => {} + mir::StatementKind::StorageDead(_) | + mir::StatementKind::Nop => {} mir::StatementKind::SetDiscriminant{ .. } => { span_bug!(span, "SetDiscriminant should not appear in constants?"); } @@ -330,10 +334,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { ErrKind::Math(err.clone()) } }; - match consts::const_err(self.ccx, span, Err(err), TrueConst::Yes) { - Ok(()) => {} - Err(err) => if failure.is_ok() { failure = Err(err); } - } + + let err = ConstEvalErr{ span: span, kind: err }; + report_const_eval_err(tcx, &err, span, "expression").emit(); + failure = Err(err); } target } @@ -380,7 +384,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span) - -> Result, ConstEvalFailure> { + -> Result, ConstEvalErr> { let tcx = self.ccx.tcx(); if let Some(index) = self.mir.local_index(lvalue) { @@ -396,7 +400,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::Lvalue::ReturnPointer => bug!(), // handled above mir::Lvalue::Static(def_id) => { ConstLvalue { - base: Base::Static(consts::get_static(self.ccx, def_id).val), + base: Base::Static(consts::get_static(self.ccx, def_id)), llextra: ptr::null_mut(), ty: lvalue.ty(self.mir, tcx).to_ty(tcx) } @@ -421,17 +425,23 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } else if let ty::TyStr = projected_ty.sty { (Base::Str(base), extra) } else { - let val = consts::load_const(self.ccx, base, projected_ty); + let v = base; + let v = self.ccx.const_unsized().borrow().get(&v).map_or(v, |&v| v); + let mut val = unsafe { llvm::LLVMGetInitializer(v) }; if val.is_null() { span_bug!(span, "dereference of non-constant pointer `{:?}`", Value(base)); } + if projected_ty.is_bool() { + unsafe { + val = llvm::LLVMConstTrunc(val, Type::i1(self.ccx).to_ref()); + } + } (Base::Value(val), extra) } } mir::ProjectionElem::Field(ref field, _) => { - let base_repr = adt::represent_type(self.ccx, tr_base.ty); - let llprojected = adt::const_get_field(&base_repr, base.llval, + let llprojected = adt::const_get_field(self.ccx, tr_base.ty, base.llval, Disr(0), field.index()); let llextra = if is_sized { ptr::null_mut() @@ -472,7 +482,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } fn const_operand(&self, operand: &mir::Operand<'tcx>, span: Span) - -> Result, ConstEvalFailure> { + -> Result, ConstEvalErr> { debug!("const_operand({:?} @ {:?})", operand, span); let result = match *operand { mir::Operand::Consume(ref lvalue) => { @@ -527,7 +537,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, dest_ty: Ty<'tcx>, span: Span) - -> Result, ConstEvalFailure> { + -> Result, ConstEvalErr> { let tcx = self.ccx.tcx(); debug!("const_rvalue({:?}: {:?} @ {:?})", rvalue, dest_ty, span); let val = match *rvalue { @@ -569,14 +579,13 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::AggregateKind::Closure(..) | mir::AggregateKind::Tuple => { let disr = match *kind { - mir::AggregateKind::Adt(adt_def, index, _) => { + mir::AggregateKind::Adt(adt_def, index, _, _) => { Disr::from(adt_def.variants[index].disr_val) } _ => Disr(0) }; - let repr = adt::represent_type(self.ccx, dest_ty); Const::new( - adt::trans_const(self.ccx, &repr, disr, &fields), + adt::trans_const(self.ccx, dest_ty, disr, &fields), dest_ty ) } @@ -592,7 +601,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { match operand.ty.sty { ty::TyFnDef(def_id, substs, _) => { Callee::def(self.ccx, def_id, substs) - .reify(self.ccx).val + .reify(self.ccx) } _ => { span_bug!(span, "{} cannot be reified to a fn ptr", @@ -647,8 +656,8 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let ll_t_out = type_of::immediate_type_of(self.ccx, cast_ty); let llval = operand.llval; let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in { - let repr = adt::represent_type(self.ccx, operand.ty); - adt::is_discr_signed(&repr) + let l = self.ccx.layout_of(operand.ty); + adt::is_discr_signed(&l) } else { operand.ty.is_signed() }; @@ -724,7 +733,12 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let base = match tr_lvalue.base { Base::Value(llval) => { - let align = type_of::align_of(self.ccx, ty); + // FIXME: may be wrong for &*(&simd_vec as &fmt::Debug) + let align = if type_is_sized(self.ccx.tcx(), ty) { + type_of::align_of(self.ccx, ty) + } else { + self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign + }; if bk == mir::BorrowKind::Mut { consts::addr_of_mut(self.ccx, llval, align, "ref_mut") } else { @@ -811,6 +825,54 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } +fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option { + match t.sty { + ty::TyInt(int_type) => const_to_opt_int(value).and_then(|input| match int_type { + ast::IntTy::I8 => { + assert_eq!(input as i8 as i64, input); + Some(ConstInt::I8(input as i8)) + }, + ast::IntTy::I16 => { + assert_eq!(input as i16 as i64, input); + Some(ConstInt::I16(input as i16)) + }, + ast::IntTy::I32 => { + assert_eq!(input as i32 as i64, input); + Some(ConstInt::I32(input as i32)) + }, + ast::IntTy::I64 => { + Some(ConstInt::I64(input)) + }, + ast::IntTy::Is => { + ConstIsize::new(input, tcx.sess.target.int_type) + .ok().map(ConstInt::Isize) + }, + }), + ty::TyUint(uint_type) => const_to_opt_uint(value).and_then(|input| match uint_type { + ast::UintTy::U8 => { + assert_eq!(input as u8 as u64, input); + Some(ConstInt::U8(input as u8)) + }, + ast::UintTy::U16 => { + assert_eq!(input as u16 as u64, input); + Some(ConstInt::U16(input as u16)) + }, + ast::UintTy::U32 => { + assert_eq!(input as u32 as u64, input); + Some(ConstInt::U32(input as u32)) + }, + ast::UintTy::U64 => { + Some(ConstInt::U64(input)) + }, + ast::UintTy::Us => { + ConstUsize::new(input, tcx.sess.target.uint_type) + .ok().map(ConstInt::Usize) + }, + }), + _ => None, + } +} + pub fn const_scalar_binop(op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, @@ -932,19 +994,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } }; - let result = match result { - Ok(v) => v, - Err(ConstEvalFailure::Compiletime(_)) => { - // We've errored, so we don't have to produce working code. - let llty = type_of::type_of(bcx.ccx(), ty); - Const::new(C_undef(llty), ty) - } - Err(ConstEvalFailure::Runtime(err)) => { - span_bug!(constant.span, - "MIR constant {:?} results in runtime panic: {:?}", - constant, err.description()) - } - }; + let result = result.unwrap_or_else(|_| { + // We've errored, so we don't have to produce working code. + let llty = type_of::type_of(bcx.ccx(), ty); + Const::new(C_undef(llty), ty) + }); debug!("trans_constant({:?}) = {:?}", constant, result); result @@ -953,7 +1007,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn trans_static_initializer(ccx: &CrateContext, def_id: DefId) - -> Result { + -> Result { let instance = Instance::mono(ccx.shared(), def_id); MirConstContext::trans_def(ccx, instance, IndexVec::new()).map(|c| c.llval) } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 94db2e3c23..0ce5544c3b 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -109,7 +109,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Lvalue::ReturnPointer => bug!(), // handled above mir::Lvalue::Static(def_id) => { let const_ty = self.monomorphized_lvalue_ty(lvalue); - LvalueRef::new_sized(consts::get_static(ccx, def_id).val, + LvalueRef::new_sized(consts::get_static(ccx, def_id), LvalueTy::from_ty(const_ty)) }, mir::Lvalue::Projection(box mir::Projection { @@ -152,7 +152,6 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::ProjectionElem::Deref => bug!(), mir::ProjectionElem::Field(ref field, _) => { let base_ty = tr_base.ty.to_ty(tcx); - let base_repr = adt::represent_type(ccx, base_ty); let discr = match tr_base.ty { LvalueTy::Ty { .. } => 0, LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v, @@ -164,7 +163,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } else { adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra) }; - let llprojected = adt::trans_field_ptr_builder(bcx, &base_repr, base, + let llprojected = adt::trans_field_ptr_builder(bcx, base_ty, base, Disr(discr), field.index()); let llextra = if is_sized { ptr::null_mut() diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 3b2059cd7e..37ce31a3c4 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -10,18 +10,17 @@ use libc::c_uint; use llvm::{self, ValueRef}; -use llvm::debuginfo::DIScope; use rustc::ty; use rustc::mir::repr as mir; use rustc::mir::tcx::LvalueTy; use session::config::FullDebugInfo; use base; use common::{self, Block, BlockAndBuilder, CrateContext, FunctionContext, C_null}; -use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind}; +use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind, FunctionDebugContext}; use machine; use type_of; -use syntax_pos::DUMMY_SP; +use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos}; use std::ops::Deref; use std::rc::Rc; @@ -102,12 +101,67 @@ pub struct MirContext<'bcx, 'tcx:'bcx> { locals: IndexVec>, /// Debug information for MIR scopes. - scopes: IndexVec + scopes: IndexVec, } impl<'blk, 'tcx> MirContext<'blk, 'tcx> { - pub fn debug_loc(&self, source_info: mir::SourceInfo) -> DebugLoc { - DebugLoc::ScopeAt(self.scopes[source_info.scope], source_info.span) + pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> DebugLoc { + // Bail out if debug info emission is not enabled. + match self.fcx.debug_context { + FunctionDebugContext::DebugInfoDisabled | + FunctionDebugContext::FunctionWithoutDebugInfo => { + // Can't return DebugLoc::None here because intrinsic::trans_intrinsic_call() + // relies on debug location to obtain span of the call site. + return DebugLoc::ScopeAt(self.scopes[source_info.scope].scope_metadata, + source_info.span); + } + FunctionDebugContext::RegularContext(_) =>{} + } + + // In order to have a good line stepping behavior in debugger, we overwrite debug + // locations of macro expansions with that of the outermost expansion site + // (unless the crate is being compiled with `-Z debug-macros`). + if source_info.span.expn_id == NO_EXPANSION || + source_info.span.expn_id == COMMAND_LINE_EXPN || + self.fcx.ccx.sess().opts.debugging_opts.debug_macros { + + let scope_metadata = self.scope_metadata_for_loc(source_info.scope, + source_info.span.lo); + DebugLoc::ScopeAt(scope_metadata, source_info.span) + } else { + let cm = self.fcx.ccx.sess().codemap(); + // Walk up the macro expansion chain until we reach a non-expanded span. + let mut span = source_info.span; + while span.expn_id != NO_EXPANSION && span.expn_id != COMMAND_LINE_EXPN { + if let Some(callsite_span) = cm.with_expn_info(span.expn_id, + |ei| ei.map(|ei| ei.call_site.clone())) { + span = callsite_span; + } else { + break; + } + } + let scope_metadata = self.scope_metadata_for_loc(source_info.scope, span.lo); + // Use span of the outermost call site, while keeping the original lexical scope + DebugLoc::ScopeAt(scope_metadata, span) + } + } + + // DILocations inherit source file name from the parent DIScope. Due to macro expansions + // it may so happen that the current span belongs to a different file than the DIScope + // corresponding to span's containing visibility scope. If so, we need to create a DIScope + // "extension" into that file. + fn scope_metadata_for_loc(&self, scope_id: mir::VisibilityScope, pos: BytePos) + -> llvm::debuginfo::DIScope { + let scope_metadata = self.scopes[scope_id].scope_metadata; + if pos < self.scopes[scope_id].file_start_pos || + pos >= self.scopes[scope_id].file_end_pos { + let cm = self.fcx.ccx.sess().codemap(); + debuginfo::extend_scope_to_file(self.fcx.ccx, + scope_metadata, + &cm.lookup_char_pos(pos).file) + } else { + scope_metadata + } } } @@ -144,7 +198,7 @@ impl<'tcx> LocalRef<'tcx> { /////////////////////////////////////////////////////////////////////////// pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { - let bcx = fcx.init(false, None).build(); + let bcx = fcx.init(true).build(); let mir = bcx.mir(); // Analyze the temps to determine which must be lvalues @@ -154,16 +208,38 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { analyze::cleanup_kinds(bcx, &mir)) }); + // Allocate a `Block` for every basic block + let block_bcxs: IndexVec> = + mir.basic_blocks().indices().map(|bb| { + if bb == mir::START_BLOCK { + fcx.new_block("start") + } else { + fcx.new_block(&format!("{:?}", bb)) + } + }).collect(); + // Compute debuginfo scopes from MIR scopes. let scopes = debuginfo::create_mir_scopes(fcx); + let mut mircx = MirContext { + mir: mir.clone(), + fcx: fcx, + llpersonalityslot: None, + blocks: block_bcxs, + unreachable_block: None, + cleanup_kinds: cleanup_kinds, + landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), + scopes: scopes, + locals: IndexVec::new(), + }; + // Allocate variable and temp allocas - let locals = { - let args = arg_local_refs(&bcx, &mir, &scopes, &lvalue_locals); + mircx.locals = { + let args = arg_local_refs(&bcx, &mir, &mircx.scopes, &lvalue_locals); let vars = mir.var_decls.iter().enumerate().map(|(i, decl)| { let ty = bcx.monomorphize(&decl.ty); - let scope = scopes[decl.source_info.scope]; - let dbg = !scope.is_null() && bcx.sess().opts.debuginfo == FullDebugInfo; + let debug_scope = mircx.scopes[decl.source_info.scope]; + let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo; let local = mir.local_index(&mir::Lvalue::Var(mir::Var::new(i))).unwrap(); if !lvalue_locals.contains(local.index()) && !dbg { @@ -172,11 +248,16 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { let lvalue = LvalueRef::alloca(&bcx, ty, &decl.name.as_str()); if dbg { - bcx.with_block(|bcx| { - declare_local(bcx, decl.name, ty, scope, - VariableAccess::DirectVariable { alloca: lvalue.llval }, - VariableKind::LocalVariable, decl.source_info.span); - }); + let dbg_loc = mircx.debug_loc(decl.source_info); + if let DebugLoc::ScopeAt(scope, span) = dbg_loc { + bcx.with_block(|bcx| { + declare_local(bcx, decl.name, ty, scope, + VariableAccess::DirectVariable { alloca: lvalue.llval }, + VariableKind::LocalVariable, span); + }); + } else { + panic!("Unexpected"); + } } LocalRef::Lvalue(lvalue) }); @@ -202,18 +283,8 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { })).collect() }; - // Allocate a `Block` for every basic block - let block_bcxs: IndexVec> = - mir.basic_blocks().indices().map(|bb| { - if bb == mir::START_BLOCK { - fcx.new_block("start", None) - } else { - fcx.new_block(&format!("{:?}", bb), None) - } - }).collect(); - // Branch to the START block - let start_bcx = block_bcxs[mir::START_BLOCK]; + let start_bcx = mircx.blocks[mir::START_BLOCK]; bcx.br(start_bcx.llbb); // Up until here, IR instructions for this function have explicitly not been annotated with @@ -221,18 +292,6 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { // emitting should be enabled. debuginfo::start_emitting_source_locations(fcx); - let mut mircx = MirContext { - mir: mir.clone(), - fcx: fcx, - llpersonalityslot: None, - blocks: block_bcxs, - unreachable_block: None, - cleanup_kinds: cleanup_kinds, - landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), - locals: locals, - scopes: scopes - }; - let mut visited = BitVector::new(mir.basic_blocks().len()); let mut rpo = traversal::reverse_postorder(&mir); @@ -270,7 +329,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { /// indirect. fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, mir: &mir::Mir<'tcx>, - scopes: &IndexVec, + scopes: &IndexVec, lvalue_locals: &BitVector) -> Vec> { let fcx = bcx.fcx(); @@ -280,8 +339,8 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, // Get the argument scope, if it exists and if we need it. let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE]; - let arg_scope = if !arg_scope.is_null() && bcx.sess().opts.debuginfo == FullDebugInfo { - Some(arg_scope) + let arg_scope = if arg_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo { + Some(arg_scope.scope_metadata) } else { None }; diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 9f7c2ee219..b643dcd987 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -17,7 +17,6 @@ use asm; use base; use callee::Callee; use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result}; -use datum::{Datum, Lvalue}; use debuginfo::DebugLoc; use adt; use machine; @@ -101,7 +100,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let size = C_uint(bcx.ccx(), size); let base = get_dataptr(&bcx, dest.llval); let bcx = bcx.map_block(|block| { - tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| { + tvec::slice_for_each(block, base, tr_elem.ty, size, |block, llslot| { self.store_operand_direct(block, llslot, tr_elem); block }) @@ -111,19 +110,21 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Rvalue::Aggregate(ref kind, ref operands) => { match *kind { - mir::AggregateKind::Adt(adt_def, index, _) => { - let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx())); - let disr = Disr::from(adt_def.variants[index].disr_val); + mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { + let disr = Disr::from(adt_def.variants[variant_index].disr_val); bcx.with_block(|bcx| { - adt::trans_set_discr(bcx, &repr, dest.llval, Disr::from(disr)); + adt::trans_set_discr(bcx, + dest.ty.to_ty(bcx.tcx()), dest.llval, Disr::from(disr)); }); for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. if !common::type_is_zero_size(bcx.ccx(), op.ty) { let val = adt::MaybeSizedValue::sized(dest.llval); - let lldest_i = adt::trans_field_ptr_builder(&bcx, &repr, - val, disr, i); + let field_index = active_field_index.unwrap_or(i); + let lldest_i = adt::trans_field_ptr_builder(&bcx, + dest.ty.to_ty(bcx.tcx()), + val, disr, field_index); self.store_operand(&bcx, lldest_i, op); } } @@ -157,8 +158,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { let lvalue = self.trans_lvalue(&bcx, output); - Datum::new(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()), - Lvalue::new("out")) + (lvalue.llval, lvalue.ty.to_ty(bcx.tcx())) }).collect(); let input_vals = inputs.iter().map(|input| { @@ -202,7 +202,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { ty::TyFnDef(def_id, substs, _) => { OperandValue::Immediate( Callee::def(bcx.ccx(), def_id, substs) - .reify(bcx.ccx()).val) + .reify(bcx.ccx())) } _ => { bug!("{} cannot be reified to a fn ptr", operand.ty) @@ -271,17 +271,17 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty); let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty); let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in { - let repr = adt::represent_type(bcx.ccx(), operand.ty); + let l = bcx.ccx().layout_of(operand.ty); let discr = match operand.val { OperandValue::Immediate(llval) => llval, OperandValue::Ref(llptr) => { bcx.with_block(|bcx| { - adt::trans_get_discr(bcx, &repr, llptr, None, true) + adt::trans_get_discr(bcx, operand.ty, llptr, None, true) }) } OperandValue::Pair(..) => bug!("Unexpected Pair operand") }; - (discr, adt::is_discr_signed(&repr)) + (discr, adt::is_discr_signed(&l)) } else { (operand.immediate(), operand.ty.is_signed()) }; diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 1167208955..9943acbc88 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -62,11 +62,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => { let ty = self.monomorphized_lvalue_ty(lvalue); - let repr = adt::represent_type(bcx.ccx(), ty); let lvalue_transed = self.trans_lvalue(&bcx, lvalue); bcx.with_block(|bcx| adt::trans_set_discr(bcx, - &repr, + ty, lvalue_transed.llval, Disr::from(variant_index)) ); @@ -78,6 +77,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::StatementKind::StorageDead(ref lvalue) => { self.trans_storage_liveness(bcx, lvalue, base::Lifetime::End) } + mir::StatementKind::Nop => bcx, } } diff --git a/src/librustc_trans/monomorphize.rs b/src/librustc_trans/monomorphize.rs index e9aacaa0f9..270ce79620 100644 --- a/src/librustc_trans/monomorphize.rs +++ b/src/librustc_trans/monomorphize.rs @@ -8,163 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::ValueRef; -use llvm; +use common::*; use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; -use rustc::ty::subst; +use rustc::ty::fold::{TypeFolder, TypeFoldable}; use rustc::ty::subst::{Subst, Substs}; -use rustc::ty::{self, Ty, TypeFoldable, TyCtxt}; -use attributes; -use base::{push_ctxt}; -use base; -use common::*; -use declare; -use Disr; -use rustc::hir::map as hir_map; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::ppaux; - -use rustc::hir; - -use errors; - +use rustc::util::common::MemoizationMap; use std::fmt; -use trans_item::TransItem; - -pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fn_id: DefId, - psubsts: &'tcx subst::Substs<'tcx>) - -> (ValueRef, Ty<'tcx>) { - debug!("monomorphic_fn(fn_id={:?}, real_substs={:?})", fn_id, psubsts); - assert!(!psubsts.types.needs_infer() && !psubsts.types.has_param_types()); - - let _icx = push_ctxt("monomorphic_fn"); - - let instance = Instance::new(fn_id, psubsts); - - let item_ty = ccx.tcx().lookup_item_type(fn_id).ty; - - debug!("monomorphic_fn about to subst into {:?}", item_ty); - let mono_ty = apply_param_substs(ccx.tcx(), psubsts, &item_ty); - debug!("mono_ty = {:?} (post-substitution)", mono_ty); - - if let Some(&val) = ccx.instances().borrow().get(&instance) { - debug!("leaving monomorphic fn {:?}", instance); - return (val, mono_ty); - } else { - assert!(!ccx.codegen_unit().contains_item(&TransItem::Fn(instance))); - } - - debug!("monomorphic_fn({:?})", instance); - - ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1); - - let depth; - { - let mut monomorphizing = ccx.monomorphizing().borrow_mut(); - depth = match monomorphizing.get(&fn_id) { - Some(&d) => d, None => 0 - }; - - debug!("monomorphic_fn: depth for fn_id={:?} is {:?}", fn_id, depth+1); - - // Random cut-off -- code that needs to instantiate the same function - // recursively more than thirty times can probably safely be assumed - // to be causing an infinite expansion. - if depth > ccx.sess().recursion_limit.get() { - let error = format!("reached the recursion limit while instantiating `{}`", - instance); - if let Some(id) = ccx.tcx().map.as_local_node_id(fn_id) { - ccx.sess().span_fatal(ccx.tcx().map.span(id), &error); - } else { - ccx.sess().fatal(&error); - } - } - - monomorphizing.insert(fn_id, depth + 1); - } - - let symbol = ccx.symbol_map().get_or_compute(ccx.shared(), - TransItem::Fn(instance)); - - debug!("monomorphize_fn mangled to {}", &symbol); - assert!(declare::get_defined_value(ccx, &symbol).is_none()); - - // FIXME(nagisa): perhaps needs a more fine grained selection? - let lldecl = declare::define_internal_fn(ccx, &symbol, mono_ty); - // FIXME(eddyb) Doubt all extern fn should allow unwinding. - attributes::unwind(lldecl, true); - - ccx.instances().borrow_mut().insert(instance, lldecl); - - // we can only monomorphize things in this crate (or inlined into it) - let fn_node_id = ccx.tcx().map.as_local_node_id(fn_id).unwrap(); - let map_node = errors::expect( - ccx.sess().diagnostic(), - ccx.tcx().map.find(fn_node_id), - || { - format!("while instantiating `{}`, couldn't find it in \ - the item map (may have attempted to monomorphize \ - an item defined in a different crate?)", - instance) - }); - match map_node { - hir_map::NodeItem(&hir::Item { - ref attrs, - node: hir::ItemFn(..), .. - }) | - hir_map::NodeImplItem(&hir::ImplItem { - ref attrs, node: hir::ImplItemKind::Method( - hir::MethodSig { .. }, _), .. - }) | - hir_map::NodeTraitItem(&hir::TraitItem { - ref attrs, node: hir::MethodTraitItem( - hir::MethodSig { .. }, Some(_)), .. - }) => { - let trans_item = TransItem::Fn(instance); - - if ccx.shared().translation_items().borrow().contains(&trans_item) { - attributes::from_fn_attrs(ccx, attrs, lldecl); - unsafe { - llvm::LLVMSetLinkage(lldecl, llvm::ExternalLinkage); - } - } else { - // FIXME: #34151 - // Normally, getting here would indicate a bug in trans::collector, - // since it seems to have missed a translation item. When we are - // translating with non-MIR based trans, however, the results of - // the collector are not entirely reliable since it bases its - // analysis on MIR. Thus, we'll instantiate the missing function - // privately in this codegen unit, so that things keep working. - ccx.stats().n_fallback_instantiations.set(ccx.stats() - .n_fallback_instantiations - .get() + 1); - trans_item.predefine(ccx, llvm::InternalLinkage); - trans_item.define(ccx); - } - } - - hir_map::NodeVariant(_) | hir_map::NodeStructCtor(_) => { - let disr = match map_node { - hir_map::NodeVariant(_) => { - Disr::from(inlined_variant_def(ccx, fn_node_id).disr_val) - } - hir_map::NodeStructCtor(_) => Disr(0), - _ => bug!() - }; - attributes::inline(lldecl, attributes::InlineAttr::Hint); - attributes::set_frame_pointer_elimination(ccx, lldecl); - base::trans_ctor_shim(ccx, fn_node_id, disr, psubsts, lldecl); - } - - _ => bug!("can't monomorphize a {:?}", map_node) - }; - - ccx.monomorphizing().borrow_mut().insert(fn_id, depth); - - debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id)); - (lldecl, mono_ty) -} #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct Instance<'tcx> { @@ -174,14 +26,14 @@ pub struct Instance<'tcx> { impl<'tcx> fmt::Display for Instance<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - ppaux::parameterized(f, &self.substs, self.def, ppaux::Ns::Value, &[], |_| None) + ppaux::parameterized(f, &self.substs, self.def, &[]) } } impl<'tcx> Instance<'tcx> { pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>) -> Instance<'tcx> { - assert!(substs.regions.iter().all(|&r| r == ty::ReErased)); + assert!(substs.regions().all(|&r| r == ty::ReErased)); Instance { def: def_id, substs: substs } } pub fn mono<'a>(scx: &SharedCrateContext<'a, 'tcx>, def_id: DefId) -> Instance<'tcx> { @@ -191,14 +43,17 @@ impl<'tcx> Instance<'tcx> { /// Monomorphizes a type from the AST by first applying the in-scope /// substitutions and then normalizing any associated types. -pub fn apply_param_substs<'a, 'tcx, T>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +pub fn apply_param_substs<'a, 'tcx, T>(scx: &SharedCrateContext<'a, 'tcx>, param_substs: &Substs<'tcx>, value: &T) -> T where T: TransNormalize<'tcx> { + let tcx = scx.tcx(); + debug!("apply_param_substs(param_substs={:?}, value={:?})", param_substs, value); let substituted = value.subst(tcx, param_substs); - tcx.normalize_associated_type(&substituted) + let substituted = scx.tcx().erase_regions(&substituted); + AssociatedTypeNormalizer::new(scx).fold(&substituted) } @@ -210,3 +65,40 @@ pub fn field_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, { tcx.normalize_associated_type(&f.ty(tcx, param_substs)) } + +struct AssociatedTypeNormalizer<'a, 'b: 'a, 'gcx: 'b> { + shared: &'a SharedCrateContext<'b, 'gcx>, +} + +impl<'a, 'b, 'gcx> AssociatedTypeNormalizer<'a, 'b, 'gcx> { + fn new(shared: &'a SharedCrateContext<'b, 'gcx>) -> Self { + AssociatedTypeNormalizer { + shared: shared, + } + } + + fn fold>(&mut self, value: &T) -> T { + if !value.has_projection_types() { + value.clone() + } else { + value.fold_with(self) + } + } +} + +impl<'a, 'b, 'gcx> TypeFolder<'gcx, 'gcx> for AssociatedTypeNormalizer<'a, 'b, 'gcx> { + fn tcx<'c>(&'c self) -> TyCtxt<'c, 'gcx, 'gcx> { + self.shared.tcx() + } + + fn fold_ty(&mut self, ty: Ty<'gcx>) -> Ty<'gcx> { + if !ty.has_projection_types() { + ty + } else { + self.shared.project_cache().memoize(ty, || { + debug!("AssociatedTypeNormalizer: ty={:?}", ty); + self.shared.tcx().normalize_associated_type(&ty) + }) + } + } +} diff --git a/src/librustc_trans/partitioning.rs b/src/librustc_trans/partitioning.rs index ade6e8abeb..625b43c7d1 100644 --- a/src/librustc_trans/partitioning.rs +++ b/src/librustc_trans/partitioning.rs @@ -117,6 +117,7 @@ //! inlining, even when they are not marked #[inline]. use collector::InliningMap; +use context::SharedCrateContext; use llvm; use monomorphize; use rustc::dep_graph::{DepNode, WorkProductId}; @@ -126,13 +127,14 @@ use rustc::session::config::NUMBERED_CODEGEN_UNIT_MARKER; use rustc::ty::TyCtxt; use rustc::ty::item_path::characteristic_def_id_of_type; use std::cmp::Ordering; -use std::hash::{Hash, Hasher, SipHasher}; +use std::hash::{Hash, Hasher}; use std::sync::Arc; +use std::collections::hash_map::DefaultHasher; use symbol_map::SymbolMap; use syntax::ast::NodeId; use syntax::parse::token::{self, InternedString}; use trans_item::TransItem; -use util::nodemap::{FnvHashMap, FnvHashSet, NodeSet}; +use util::nodemap::{FnvHashMap, FnvHashSet}; pub enum PartitioningStrategy { /// Generate one codegen unit per source-level module. @@ -187,7 +189,7 @@ impl<'tcx> CodegenUnit<'tcx> { } pub fn compute_symbol_name_hash(&self, tcx: TyCtxt, symbol_map: &SymbolMap) -> u64 { - let mut state = SipHasher::new(); + let mut state = DefaultHasher::new(); let all_items = self.items_in_deterministic_order(tcx, symbol_map); for (item, _) in all_items { let symbol_name = symbol_map.get(item).unwrap(); @@ -250,35 +252,29 @@ impl<'tcx> CodegenUnit<'tcx> { // Anything we can't find a proper codegen unit for goes into this. const FALLBACK_CODEGEN_UNIT: &'static str = "__rustc_fallback_codegen_unit"; -pub fn partition<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +pub fn partition<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>, trans_items: I, strategy: PartitioningStrategy, - inlining_map: &InliningMap<'tcx>, - reachable: &NodeSet) + inlining_map: &InliningMap<'tcx>) -> Vec> where I: Iterator> { - if let PartitioningStrategy::FixedUnitCount(1) = strategy { - // If there is only a single codegen-unit, we can use a very simple - // scheme and don't have to bother with doing much analysis. - return vec![single_codegen_unit(tcx, trans_items, reachable)]; - } + let tcx = scx.tcx(); // In the first step, we place all regular translation items into their // respective 'home' codegen unit. Regular translation items are all // functions and statics defined in the local crate. - let mut initial_partitioning = place_root_translation_items(tcx, - trans_items, - reachable); + let mut initial_partitioning = place_root_translation_items(scx, + trans_items); - debug_dump(tcx, "INITIAL PARTITONING:", initial_partitioning.codegen_units.iter()); + debug_dump(scx, "INITIAL PARTITONING:", initial_partitioning.codegen_units.iter()); // If the partitioning should produce a fixed count of codegen units, merge // until that count is reached. if let PartitioningStrategy::FixedUnitCount(count) = strategy { merge_codegen_units(&mut initial_partitioning, count, &tcx.crate_name[..]); - debug_dump(tcx, "POST MERGING:", initial_partitioning.codegen_units.iter()); + debug_dump(scx, "POST MERGING:", initial_partitioning.codegen_units.iter()); } // In the next step, we use the inlining map to determine which addtional @@ -288,7 +284,7 @@ pub fn partition<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let post_inlining = place_inlined_translation_items(initial_partitioning, inlining_map); - debug_dump(tcx, "POST INLINING:", post_inlining.0.iter()); + debug_dump(scx, "POST INLINING:", post_inlining.0.iter()); // Finally, sort by codegen unit name, so that we get deterministic results let mut result = post_inlining.0; @@ -306,20 +302,20 @@ struct PreInliningPartitioning<'tcx> { struct PostInliningPartitioning<'tcx>(Vec>); -fn place_root_translation_items<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - trans_items: I, - _reachable: &NodeSet) +fn place_root_translation_items<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>, + trans_items: I) -> PreInliningPartitioning<'tcx> where I: Iterator> { + let tcx = scx.tcx(); let mut roots = FnvHashSet(); let mut codegen_units = FnvHashMap(); for trans_item in trans_items { - let is_root = !trans_item.is_instantiated_only_on_demand(); + let is_root = !trans_item.is_instantiated_only_on_demand(tcx); if is_root { - let characteristic_def_id = characteristic_def_id_of_trans_item(tcx, trans_item); + let characteristic_def_id = characteristic_def_id_of_trans_item(scx, trans_item); let is_volatile = trans_item.is_generic_fn(); let codegen_unit_name = match characteristic_def_id { @@ -342,10 +338,14 @@ fn place_root_translation_items<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, TransItem::DropGlue(..) => unreachable!(), // Is there any benefit to using ExternalLinkage?: TransItem::Fn(ref instance) => { - if instance.substs.types.is_empty() { + if instance.substs.types().next().is_none() { // This is a non-generic functions, we always // make it visible externally on the chance that // it might be used in another codegen unit. + // Later on base::internalize_symbols() will + // assign "internal" linkage to those symbols + // that are not referenced from other codegen + // units (and are not publicly visible). llvm::ExternalLinkage } else { // In the current setup, generic functions cannot @@ -450,7 +450,6 @@ fn place_inlined_translation_items<'tcx>(initial_partitioning: PreInliningPartit // reliably in that case. new_codegen_unit.items.insert(trans_item, llvm::InternalLinkage); } else { - assert!(trans_item.is_instantiated_only_on_demand()); // We can't be sure if this will also be instantiated // somewhere else, so we add an instance here with // InternalLinkage so we don't get any conflicts. @@ -477,16 +476,18 @@ fn place_inlined_translation_items<'tcx>(initial_partitioning: PreInliningPartit } } -fn characteristic_def_id_of_trans_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +fn characteristic_def_id_of_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, trans_item: TransItem<'tcx>) -> Option { + let tcx = scx.tcx(); match trans_item { TransItem::Fn(instance) => { // If this is a method, we want to put it into the same module as // its self-type. If the self-type does not provide a characteristic // DefId, we use the location of the impl after all. - if let Some(self_ty) = instance.substs.self_ty() { + if tcx.trait_of_item(instance.def).is_some() { + let self_ty = instance.substs.type_at(0); // This is an implementation of a trait method. return characteristic_def_id_of_type(self_ty).or(Some(instance.def)); } @@ -496,7 +497,7 @@ fn characteristic_def_id_of_trans_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // self-type is: let impl_self_ty = tcx.lookup_item_type(impl_def_id).ty; let impl_self_ty = tcx.erase_regions(&impl_self_ty); - let impl_self_ty = monomorphize::apply_param_substs(tcx, + let impl_self_ty = monomorphize::apply_param_substs(scx, instance.substs, &impl_self_ty); @@ -544,68 +545,6 @@ fn compute_codegen_unit_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, return token::intern_and_get_ident(&mod_path[..]); } -fn single_codegen_unit<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - trans_items: I, - reachable: &NodeSet) - -> CodegenUnit<'tcx> - where I: Iterator> -{ - let mut items = FnvHashMap(); - - for trans_item in trans_items { - let linkage = trans_item.explicit_linkage(tcx).unwrap_or_else(|| { - match trans_item { - TransItem::Static(node_id) => { - if reachable.contains(&node_id) { - llvm::ExternalLinkage - } else { - llvm::PrivateLinkage - } - } - TransItem::DropGlue(_) => { - llvm::InternalLinkage - } - TransItem::Fn(instance) => { - if trans_item.is_generic_fn() { - // FIXME(mw): Assigning internal linkage to all - // monomorphizations is potentially a waste of space - // since monomorphizations could be shared between - // crates. The main reason for making them internal is - // a limitation in MingW's binutils that cannot deal - // with COFF object that have more than 2^15 sections, - // which is something that can happen for large programs - // when every function gets put into its own COMDAT - // section. - llvm::InternalLinkage - } else if trans_item.is_from_extern_crate() { - // FIXME(mw): It would be nice if we could mark these as - // `AvailableExternallyLinkage`, since they should have - // been instantiated in the extern crate. But this - // sometimes leads to crashes on Windows because LLVM - // does not handle exception handling table instantiation - // reliably in that case. - llvm::InternalLinkage - } else if reachable.contains(&tcx.map - .as_local_node_id(instance.def) - .unwrap()) { - llvm::ExternalLinkage - } else { - // Functions that are not visible outside this crate can - // be marked as internal. - llvm::InternalLinkage - } - } - } - }); - - items.insert(trans_item, linkage); - } - - CodegenUnit::new( - numbered_codegen_unit_name(&tcx.crate_name[..], 0), - items) -} - fn numbered_codegen_unit_name(crate_name: &str, index: usize) -> InternedString { token::intern_and_get_ident(&format!("{}{}{}", crate_name, @@ -613,7 +552,7 @@ fn numbered_codegen_unit_name(crate_name: &str, index: usize) -> InternedString index)[..]) } -fn debug_dump<'a, 'b, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +fn debug_dump<'a, 'b, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>, label: &str, cgus: I) where I: Iterator>, @@ -622,10 +561,21 @@ fn debug_dump<'a, 'b, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if cfg!(debug_assertions) { debug!("{}", label); for cgu in cgus { + let symbol_map = SymbolMap::build(scx, cgu.items + .iter() + .map(|(&trans_item, _)| trans_item)); debug!("CodegenUnit {}:", cgu.name); for (trans_item, linkage) in &cgu.items { - debug!(" - {} [{:?}]", trans_item.to_string(tcx), linkage); + let symbol_name = symbol_map.get_or_compute(scx, *trans_item); + let symbol_hash_start = symbol_name.rfind('h'); + let symbol_hash = symbol_hash_start.map(|i| &symbol_name[i ..]) + .unwrap_or(""); + + debug!(" - {} [{:?}] [{}]", + trans_item.to_string(scx.tcx()), + linkage, + symbol_hash); } debug!(""); diff --git a/src/librustc_trans/symbol_names_test.rs b/src/librustc_trans/symbol_names_test.rs index 9a7fe54e0d..25c30151ad 100644 --- a/src/librustc_trans/symbol_names_test.rs +++ b/src/librustc_trans/symbol_names_test.rs @@ -17,7 +17,6 @@ use rustc::hir; use rustc::hir::intravisit::{self, Visitor}; use syntax::ast; -use syntax::attr::AttrMetaMethods; use common::SharedCrateContext; use monomorphize::Instance; diff --git a/src/librustc_trans/trans_item.rs b/src/librustc_trans/trans_item.rs index 90058f0b83..131df50217 100644 --- a/src/librustc_trans/trans_item.rs +++ b/src/librustc_trans/trans_item.rs @@ -22,17 +22,15 @@ use declare; use glue::DropGlueKind; use llvm; use monomorphize::{self, Instance}; -use inline; use rustc::dep_graph::DepNode; use rustc::hir; -use rustc::hir::map as hir_map; use rustc::hir::def_id::DefId; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; -use rustc::ty::subst; +use rustc::ty::subst::Substs; use rustc_const_eval::fatal_const_eval_err; use std::hash::{Hash, Hasher}; use syntax::ast::{self, NodeId}; -use syntax::{attr,errors}; +use syntax::attr; use type_of; use glue; use abi::{Abi, FnType}; @@ -88,13 +86,13 @@ impl<'a, 'tcx> TransItem<'tcx> { let def_id = ccx.tcx().map.local_def_id(node_id); let _task = ccx.tcx().dep_graph.in_task(DepNode::TransCrateItem(def_id)); // (*) let item = ccx.tcx().map.expect_item(node_id); - if let hir::ItemStatic(_, m, ref expr) = item.node { - match consts::trans_static(&ccx, m, expr, item.id, &item.attrs) { + if let hir::ItemStatic(_, m, _) = item.node { + match consts::trans_static(&ccx, m, item.id, &item.attrs) { Ok(_) => { /* Cool, everything's alright. */ }, Err(err) => { // FIXME: shouldn't this be a `span_err`? fatal_const_eval_err( - ccx.tcx(), &err, expr.span, "static"); + ccx.tcx(), &err, item.span, "static"); } }; } else { @@ -157,70 +155,41 @@ impl<'a, 'tcx> TransItem<'tcx> { let ty = ccx.tcx().lookup_item_type(def_id).ty; let llty = type_of::type_of(ccx, ty); - match ccx.tcx().map.get(node_id) { - hir::map::NodeItem(&hir::Item { - span, node: hir::ItemStatic(..), .. - }) => { - let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| { - ccx.sess().span_fatal(span, - &format!("symbol `{}` is already defined", symbol_name)) - }); + let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| { + ccx.sess().span_fatal(ccx.tcx().map.span(node_id), + &format!("symbol `{}` is already defined", symbol_name)) + }); - unsafe { llvm::LLVMSetLinkage(g, linkage) }; - } + unsafe { llvm::LLVMRustSetLinkage(g, linkage) }; - item => bug!("predefine_static: expected static, found {:?}", item) - } + let instance = Instance::mono(ccx.shared(), def_id); + ccx.instances().borrow_mut().insert(instance, g); + ccx.statics().borrow_mut().insert(g, def_id); } fn predefine_fn(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>, linkage: llvm::Linkage, symbol_name: &str) { - assert!(!instance.substs.types.needs_infer() && - !instance.substs.types.has_param_types()); - - let instance = inline::maybe_inline_instance(ccx, instance); + assert!(!instance.substs.needs_infer() && + !instance.substs.has_param_types()); let item_ty = ccx.tcx().lookup_item_type(instance.def).ty; let item_ty = ccx.tcx().erase_regions(&item_ty); - let mono_ty = monomorphize::apply_param_substs(ccx.tcx(), instance.substs, &item_ty); - - let fn_node_id = ccx.tcx().map.as_local_node_id(instance.def).unwrap(); - let map_node = errors::expect( - ccx.sess().diagnostic(), - ccx.tcx().map.find(fn_node_id), - || { - format!("while instantiating `{}`, couldn't find it in \ - the item map (may have attempted to monomorphize \ - an item defined in a different crate?)", - instance) - }); - - match map_node { - hir_map::NodeItem(&hir::Item { - ref attrs, node: hir::ItemFn(..), .. - }) | - hir_map::NodeTraitItem(&hir::TraitItem { - ref attrs, node: hir::MethodTraitItem(..), .. - }) | - hir_map::NodeImplItem(&hir::ImplItem { - ref attrs, node: hir::ImplItemKind::Method(..), .. - }) => { - let lldecl = declare::declare_fn(ccx, symbol_name, mono_ty); - unsafe { llvm::LLVMSetLinkage(lldecl, linkage) }; - base::set_link_section(ccx, lldecl, attrs); - if linkage == llvm::LinkOnceODRLinkage || - linkage == llvm::WeakODRLinkage { - llvm::SetUniqueComdat(ccx.llmod(), lldecl); - } + let mono_ty = monomorphize::apply_param_substs(ccx.shared(), instance.substs, &item_ty); + + let attrs = ccx.tcx().get_attrs(instance.def); + let lldecl = declare::declare_fn(ccx, symbol_name, mono_ty); + unsafe { llvm::LLVMRustSetLinkage(lldecl, linkage) }; + base::set_link_section(ccx, lldecl, &attrs); + if linkage == llvm::Linkage::LinkOnceODRLinkage || + linkage == llvm::Linkage::WeakODRLinkage { + llvm::SetUniqueComdat(ccx.llmod(), lldecl); + } - attributes::from_fn_attrs(ccx, attrs, lldecl); - ccx.instances().borrow_mut().insert(instance, lldecl); - } - _ => bug!("Invalid item for TransItem::Fn: `{:?}`", map_node) - }; + attributes::from_fn_attrs(ccx, &attrs, lldecl); + ccx.instances().borrow_mut().insert(instance, lldecl); } fn predefine_drop_glue(ccx: &CrateContext<'a, 'tcx>, @@ -245,9 +214,9 @@ impl<'a, 'tcx> TransItem<'tcx> { assert!(declare::get_defined_value(ccx, symbol_name).is_none()); let llfn = declare::declare_cfn(ccx, symbol_name, llfnty); - unsafe { llvm::LLVMSetLinkage(llfn, linkage) }; - if linkage == llvm::LinkOnceODRLinkage || - linkage == llvm::WeakODRLinkage { + unsafe { llvm::LLVMRustSetLinkage(llfn, linkage) }; + if linkage == llvm::Linkage::LinkOnceODRLinkage || + linkage == llvm::Linkage::WeakODRLinkage { llvm::SetUniqueComdat(ccx.llmod(), llfn); } attributes::set_frame_pointer_elimination(ccx, llfn); @@ -272,19 +241,6 @@ impl<'a, 'tcx> TransItem<'tcx> { } } - pub fn requests_inline(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> bool { - match *self { - TransItem::Fn(ref instance) => { - !instance.substs.types.is_empty() || { - let attributes = tcx.get_attrs(instance.def); - attr::requests_inline(&attributes[..]) - } - } - TransItem::DropGlue(..) => true, - TransItem::Static(..) => false, - } - } - pub fn is_from_extern_crate(&self) -> bool { match *self { TransItem::Fn(ref instance) => !instance.def.is_local(), @@ -293,10 +249,19 @@ impl<'a, 'tcx> TransItem<'tcx> { } } - pub fn is_instantiated_only_on_demand(&self) -> bool { + /// True if the translation item should only be translated to LLVM IR if + /// it is referenced somewhere (like inline functions, for example). + pub fn is_instantiated_only_on_demand(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> bool { + if self.explicit_linkage(tcx).is_some() { + return false; + } + match *self { - TransItem::Fn(ref instance) => !instance.def.is_local() || - !instance.substs.types.is_empty(), + TransItem::Fn(ref instance) => { + !instance.def.is_local() || + instance.substs.types().next().is_some() || + attr::requests_inline(&tcx.get_attrs(instance.def)[..]) + } TransItem::DropGlue(..) => true, TransItem::Static(..) => false, } @@ -304,12 +269,26 @@ impl<'a, 'tcx> TransItem<'tcx> { pub fn is_generic_fn(&self) -> bool { match *self { - TransItem::Fn(ref instance) => !instance.substs.types.is_empty(), + TransItem::Fn(ref instance) => { + instance.substs.types().next().is_some() + } TransItem::DropGlue(..) | TransItem::Static(..) => false, } } + /// Returns true if there has to be a local copy of this TransItem in every + /// codegen unit that references it (as with inlined functions, for example) + pub fn needs_local_copy(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> bool { + // Currently everything that is instantiated only on demand is done so + // with "internal" linkage, so we need a copy to be present in every + // codegen unit. + // This is coincidental: We could also instantiate something only if it + // is referenced (e.g. a regular, private function) but place it in its + // own codegen unit with "external" linkage. + self.is_instantiated_only_on_demand(tcx) + } + pub fn explicit_linkage(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option { let def_id = match *self { TransItem::Fn(ref instance) => instance.def, @@ -352,8 +331,7 @@ impl<'a, 'tcx> TransItem<'tcx> { }, TransItem::Static(node_id) => { let def_id = hir_map.local_def_id(node_id); - let instance = Instance::new(def_id, - tcx.mk_substs(subst::Substs::empty())); + let instance = Instance::new(def_id, Substs::empty(tcx)); to_string_internal(tcx, "static ", instance) }, }; @@ -406,7 +384,7 @@ impl<'a, 'tcx> TransItem<'tcx> { /// Same as `unique_type_name()` but with the result pushed onto the given /// `output` parameter. pub fn push_unique_type_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - t: ty::Ty<'tcx>, + t: Ty<'tcx>, output: &mut String) { match t.sty { ty::TyBool => output.push_str("bool"), @@ -425,10 +403,9 @@ pub fn push_unique_type_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty::TyUint(ast::UintTy::U64) => output.push_str("u64"), ty::TyFloat(ast::FloatTy::F32) => output.push_str("f32"), ty::TyFloat(ast::FloatTy::F64) => output.push_str("f64"), - ty::TyStruct(adt_def, substs) | - ty::TyEnum(adt_def, substs) => { + ty::TyAdt(adt_def, substs) => { push_item_name(tcx, adt_def.did, output); - push_type_params(tcx, &substs.types, &[], output); + push_type_params(tcx, substs, &[], output); }, ty::TyTuple(component_types) => { output.push('('); @@ -476,13 +453,13 @@ pub fn push_unique_type_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, output.push(']'); }, ty::TyTrait(ref trait_data) => { - push_item_name(tcx, trait_data.principal.skip_binder().def_id, output); + push_item_name(tcx, trait_data.principal.def_id(), output); push_type_params(tcx, - &trait_data.principal.skip_binder().substs.types, - &trait_data.bounds.projection_bounds, + trait_data.principal.skip_binder().substs, + &trait_data.projection_bounds, output); }, - ty::TyFnDef(_, _, &ty::BareFnTy{ unsafety, abi, ref sig } ) | + ty::TyFnDef(.., &ty::BareFnTy{ unsafety, abi, ref sig } ) | ty::TyFnPtr(&ty::BareFnTy{ unsafety, abi, ref sig } ) => { if unsafety == hir::Unsafety::Unsafe { output.push_str("unsafe "); @@ -496,7 +473,7 @@ pub fn push_unique_type_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, output.push_str("fn("); - let sig = tcx.erase_late_bound_regions(sig); + let sig = tcx.erase_late_bound_regions_and_normalize(sig); if !sig.inputs.is_empty() { for ¶meter_type in &sig.inputs { push_unique_type_name(tcx, parameter_type, output); @@ -526,7 +503,7 @@ pub fn push_unique_type_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, output.push_str("{"); output.push_str(&format!("{}:{}", def_id.krate, def_id.index.as_usize())); output.push_str("}"); - push_type_params(tcx, &closure_substs.func_substs.types, &[], output); + push_type_params(tcx, closure_substs.func_substs, &[], output); } ty::TyError | ty::TyInfer(_) | @@ -561,23 +538,23 @@ fn push_item_name(tcx: TyCtxt, } fn push_type_params<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - types: &'tcx subst::VecPerParamSpace>, - projections: &[ty::PolyProjectionPredicate<'tcx>], + substs: &Substs<'tcx>, + projections: &[ty::PolyExistentialProjection<'tcx>], output: &mut String) { - if types.is_empty() && projections.is_empty() { + if substs.types().next().is_none() && projections.is_empty() { return; } output.push('<'); - for &type_parameter in types { + for type_parameter in substs.types() { push_unique_type_name(tcx, type_parameter, output); output.push_str(", "); } for projection in projections { let projection = projection.skip_binder(); - let name = &projection.projection_ty.item_name.as_str(); + let name = &projection.item_name.as_str(); output.push_str(name); output.push_str("="); push_unique_type_name(tcx, projection.ty, output); @@ -594,7 +571,7 @@ fn push_instance_as_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance<'tcx>, output: &mut String) { push_item_name(tcx, instance.def, output); - push_type_params(tcx, &instance.substs.types, &[], output); + push_type_params(tcx, instance.substs, &[], output); } pub fn def_id_to_string(tcx: TyCtxt, def_id: DefId) -> String { @@ -604,7 +581,7 @@ pub fn def_id_to_string(tcx: TyCtxt, def_id: DefId) -> String { } pub fn type_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: ty::Ty<'tcx>) + ty: Ty<'tcx>) -> String { let mut output = String::new(); push_unique_type_name(tcx, ty, &mut output); diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 11fe9c98d9..cf897fc5a1 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -13,383 +13,51 @@ use llvm; use llvm::ValueRef; use base::*; -use base; use build::*; -use cleanup; -use cleanup::CleanupMethods; use common::*; -use consts; -use datum::*; use debuginfo::DebugLoc; -use expr::{Dest, Ignore, SaveIn}; -use expr; -use machine::llsize_of_alloc; -use type_::Type; -use type_of; -use value::Value; -use rustc::ty::{self, Ty}; - -use rustc::hir; -use rustc_const_eval::eval_length; - -use syntax::ast; -use syntax::parse::token::InternedString; - -#[derive(Copy, Clone, Debug)] -struct VecTypes<'tcx> { - unit_ty: Ty<'tcx>, - llunit_ty: Type -} - -pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - dest: expr::Dest) - -> Block<'blk, 'tcx> { - //! - // - // [...] allocates a fixed-size array and moves it around "by value". - // In this case, it means that the caller has already given us a location - // to store the array of the suitable size, so all we have to do is - // generate the content. - - debug!("trans_fixed_vstore(expr={:?}, dest={:?})", expr, dest); - - let vt = vec_types_from_expr(bcx, expr); - - return match dest { - Ignore => write_content(bcx, &vt, expr, expr, dest), - SaveIn(lldest) => { - // lldest will have type *[T x N], but we want the type *T, - // so use GEP to convert: - let lldest = StructGEP(bcx, lldest, 0); - write_content(bcx, &vt, expr, expr, SaveIn(lldest)) - } - }; -} - -/// &[...] allocates memory on the stack and writes the values into it, returning the vector (the -/// caller must make the reference). "..." is similar except that the memory can be statically -/// allocated and we return a reference (strings are always by-ref). -pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - slice_expr: &hir::Expr, - content_expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { +use rustc::ty::Ty; + +pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, + data_ptr: ValueRef, + unit_ty: Ty<'tcx>, + len: ValueRef, + f: F) + -> Block<'blk, 'tcx> where + F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, +{ + let _icx = push_ctxt("tvec::slice_for_each"); let fcx = bcx.fcx; - let mut bcx = bcx; - - debug!("trans_slice_vec(slice_expr={:?})", - slice_expr); - - let vec_ty = node_id_type(bcx, slice_expr.id); - - // Handle the "..." case (returns a slice since strings are always unsized): - if let hir::ExprLit(ref lit) = content_expr.node { - if let ast::LitKind::Str(ref s, _) = lit.node { - let scratch = rvalue_scratch_datum(bcx, vec_ty, ""); - bcx = trans_lit_str(bcx, - content_expr, - s.clone(), - SaveIn(scratch.val)); - return DatumBlock::new(bcx, scratch.to_expr_datum()); - } - } - - // Handle the &[...] case: - let vt = vec_types_from_expr(bcx, content_expr); - let count = elements_required(bcx, content_expr); - debug!(" vt={:?}, count={}", vt, count); - let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count); - - // Always create an alloca even if zero-sized, to preserve - // the non-null invariant of the inner slice ptr - let llfixed; - // Issue 30018: ensure state is initialized as dropped if necessary. - if fcx.type_needs_drop(vt.unit_ty) { - llfixed = base::alloc_ty_init(bcx, fixed_ty, InitAlloca::Dropped, ""); + // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) + let zst = type_is_zero_size(bcx.ccx(), unit_ty); + let add = |bcx, a, b| if zst { + Add(bcx, a, b, DebugLoc::None) } else { - let uninit = InitAlloca::Uninit("fcx says vt.unit_ty is non-drop"); - llfixed = base::alloc_ty_init(bcx, fixed_ty, uninit, ""); - call_lifetime_start(bcx, llfixed); - }; - - if count > 0 { - // Arrange for the backing array to be cleaned up. - let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id); - fcx.schedule_lifetime_end(cleanup_scope, llfixed); - fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty, None); - - // Generate the content into the backing array. - // llfixed has type *[T x N], but we want the type *T, - // so use GEP to convert - bcx = write_content(bcx, &vt, slice_expr, content_expr, - SaveIn(StructGEP(bcx, llfixed, 0))); + InBoundsGEP(bcx, a, &[b]) }; - immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock() -} - -/// Literal strings translate to slices into static memory. This is different from -/// trans_slice_vstore() above because it doesn't need to copy the content anywhere. -pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lit_expr: &hir::Expr, - str_lit: InternedString, - dest: Dest) - -> Block<'blk, 'tcx> { - debug!("trans_lit_str(lit_expr={:?}, dest={:?})", lit_expr, dest); - - match dest { - Ignore => bcx, - SaveIn(lldest) => { - let bytes = str_lit.len(); - let llbytes = C_uint(bcx.ccx(), bytes); - let llcstr = C_cstr(bcx.ccx(), str_lit, false); - let llcstr = consts::ptrcast(llcstr, Type::i8p(bcx.ccx())); - Store(bcx, llcstr, expr::get_dataptr(bcx, lldest)); - Store(bcx, llbytes, expr::get_meta(bcx, lldest)); - bcx - } - } -} - -fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - vt: &VecTypes<'tcx>, - vstore_expr: &hir::Expr, - content_expr: &hir::Expr, - dest: Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("tvec::write_content"); - let fcx = bcx.fcx; - let mut bcx = bcx; - - debug!("write_content(vt={:?}, dest={:?}, vstore_expr={:?})", - vt, dest, vstore_expr); - - match content_expr.node { - hir::ExprLit(ref lit) => { - match lit.node { - ast::LitKind::Str(ref s, _) => { - match dest { - Ignore => return bcx, - SaveIn(lldest) => { - let bytes = s.len(); - let llbytes = C_uint(bcx.ccx(), bytes); - let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false); - if !bcx.unreachable.get() { - base::call_memcpy(&B(bcx), lldest, llcstr, llbytes, 1); - } - return bcx; - } - } - } - _ => { - span_bug!(content_expr.span, "unexpected evec content"); - } - } - } - hir::ExprVec(ref elements) => { - match dest { - Ignore => { - for element in elements { - bcx = expr::trans_into(bcx, &element, Ignore); - } - } - - SaveIn(lldest) => { - let temp_scope = fcx.push_custom_cleanup_scope(); - for (i, element) in elements.iter().enumerate() { - let lleltptr = GEPi(bcx, lldest, &[i]); - debug!("writing index {} with lleltptr={:?}", - i, Value(lleltptr)); - bcx = expr::trans_into(bcx, &element, - SaveIn(lleltptr)); - let scope = cleanup::CustomScope(temp_scope); - // Issue #30822: mark memory as dropped after running destructor - fcx.schedule_drop_and_fill_mem(scope, lleltptr, vt.unit_ty, None); - } - fcx.pop_custom_cleanup_scope(temp_scope); - } - } - return bcx; - } - hir::ExprRepeat(ref element, ref count_expr) => { - match dest { - Ignore => { - return expr::trans_into(bcx, &element, Ignore); - } - SaveIn(lldest) => { - match eval_length(bcx.tcx(), &count_expr, "repeat count").unwrap() { - 0 => expr::trans_into(bcx, &element, Ignore), - 1 => expr::trans_into(bcx, &element, SaveIn(lldest)), - count => { - let elem = unpack_datum!(bcx, expr::trans(bcx, &element)); - let bcx = iter_vec_loop(bcx, lldest, vt, - C_uint(bcx.ccx(), count), - |set_bcx, lleltptr, _| { - elem.shallow_copy(set_bcx, lleltptr) - }); - bcx - } - } - } - } - } - _ => { - span_bug!(content_expr.span, "unexpected vec content"); - } - } -} - -fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &hir::Expr) - -> VecTypes<'tcx> { - let vec_ty = node_id_type(bcx, vec_expr.id); - vec_types(bcx, vec_ty.sequence_element_type(bcx.tcx())) -} - -fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>) - -> VecTypes<'tcx> { - VecTypes { - unit_ty: unit_ty, - llunit_ty: type_of::type_of(bcx.ccx(), unit_ty) - } -} - -fn elements_required(bcx: Block, content_expr: &hir::Expr) -> usize { - //! Figure out the number of elements we need to store this content - - match content_expr.node { - hir::ExprLit(ref lit) => { - match lit.node { - ast::LitKind::Str(ref s, _) => s.len(), - _ => { - span_bug!(content_expr.span, "unexpected evec content") - } - } - }, - hir::ExprVec(ref es) => es.len(), - hir::ExprRepeat(_, ref count_expr) => { - eval_length(bcx.tcx(), &count_expr, "repeat count").unwrap() - } - _ => span_bug!(content_expr.span, "unexpected vec content") - } -} - -/// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval` -/// which should be by ref. -pub fn get_fixed_base_and_len(bcx: Block, - llval: ValueRef, - vec_length: usize) - -> (ValueRef, ValueRef) { - let ccx = bcx.ccx(); - - let base = expr::get_dataptr(bcx, llval); - let len = C_uint(ccx, vec_length); - (base, len) -} - -/// Converts a vector into the slice pair. The vector should be stored in `llval` which should be -/// by-reference. If you have a datum, you would probably prefer to call -/// `Datum::get_base_and_len()` which will handle any conversions for you. -pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llval: ValueRef, - vec_ty: Ty<'tcx>) - -> (ValueRef, ValueRef) { - match vec_ty.sty { - ty::TyArray(_, n) => get_fixed_base_and_len(bcx, llval, n), - ty::TySlice(_) | ty::TyStr => { - let base = Load(bcx, expr::get_dataptr(bcx, llval)); - let len = Load(bcx, expr::get_meta(bcx, llval)); - (base, len) - } - - // Only used for pattern matching. - ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => { - let inner = if type_is_sized(bcx.tcx(), ty) { - Load(bcx, llval) - } else { - llval - }; - get_base_and_len(bcx, inner, ty) - }, - _ => bug!("unexpected type in get_base_and_len"), - } -} + let header_bcx = fcx.new_block("slice_loop_header"); + let body_bcx = fcx.new_block("slice_loop_body"); + let next_bcx = fcx.new_block("slice_loop_next"); -fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - data_ptr: ValueRef, - vt: &VecTypes<'tcx>, - count: ValueRef, - f: F) - -> Block<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>, -{ - let _icx = push_ctxt("tvec::iter_vec_loop"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - let loop_bcx = fcx.new_temp_block("expr_repeat"); - let next_bcx = fcx.new_temp_block("expr_repeat: next"); - - Br(bcx, loop_bcx.llbb, DebugLoc::None); - - let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(), - &[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]); - - let bcx = loop_bcx; - - let lleltptr = if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 { - data_ptr + let start = if zst { + C_uint(bcx.ccx(), 0 as usize) } else { - InBoundsGEP(bcx, data_ptr, &[loop_counter]) + data_ptr }; - let bcx = f(bcx, lleltptr, vt.unit_ty); - let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1usize), DebugLoc::None); - AddIncomingToPhi(loop_counter, plusone, bcx.llbb); + let end = add(bcx, start, len); - let cond_val = ICmp(bcx, llvm::IntNE, plusone, count, DebugLoc::None); - CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None); + Br(bcx, header_bcx.llbb, DebugLoc::None); + let current = Phi(header_bcx, val_ty(start), &[start], &[bcx.llbb]); - next_bcx -} + let keep_going = + ICmp(header_bcx, llvm::IntNE, current, end, DebugLoc::None); + CondBr(header_bcx, keep_going, body_bcx.llbb, next_bcx.llbb, DebugLoc::None); -pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - data_ptr: ValueRef, - unit_ty: Ty<'tcx>, - len: ValueRef, - f: F) - -> Block<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>, -{ - let _icx = push_ctxt("tvec::iter_vec_raw"); - let fcx = bcx.fcx; - - let vt = vec_types(bcx, unit_ty); - - if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 { - // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) - iter_vec_loop(bcx, data_ptr, &vt, len, f) - } else { - // Calculate the last pointer address we want to handle. - let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]); - - // Now perform the iteration. - let header_bcx = fcx.new_temp_block("iter_vec_loop_header"); - Br(bcx, header_bcx.llbb, DebugLoc::None); - let data_ptr = - Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]); - let not_yet_at_end = - ICmp(header_bcx, llvm::IntNE, data_ptr, data_end_ptr, DebugLoc::None); - let body_bcx = fcx.new_temp_block("iter_vec_loop_body"); - let next_bcx = fcx.new_temp_block("iter_vec_next"); - CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb, DebugLoc::None); - let body_bcx = f(body_bcx, data_ptr, unit_ty); - AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr, - &[C_int(bcx.ccx(), 1)]), - body_bcx.llbb); - Br(body_bcx, header_bcx.llbb, DebugLoc::None); - next_bcx - } + let body_bcx = f(body_bcx, if zst { data_ptr } else { current }); + let next = add(body_bcx, current, C_uint(bcx.ccx(), 1usize)); + AddIncomingToPhi(current, next, body_bcx.llbb); + Br(body_bcx, header_bcx.llbb, DebugLoc::None); + next_bcx } diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index d191591e08..03a71827b4 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -18,6 +18,7 @@ use context::CrateContext; use util::nodemap::FnvHashMap; use syntax::ast; +use rustc::ty::layout; use std::ffi::CString; use std::fmt; @@ -299,6 +300,26 @@ impl Type { llvm::LLVMGetIntTypeWidth(self.to_ref()) as u64 } } + + pub fn from_integer(cx: &CrateContext, i: layout::Integer) -> Type { + use rustc::ty::layout::Integer::*; + match i { + I1 => Type::i1(cx), + I8 => Type::i8(cx), + I16 => Type::i16(cx), + I32 => Type::i32(cx), + I64 => Type::i64(cx), + } + } + + pub fn from_primitive(ccx: &CrateContext, p: layout::Primitive) -> Type { + match p { + layout::Int(i) => Type::from_integer(ccx, i), + layout::F32 => Type::f32(ccx), + layout::F64 => Type::f64(ccx), + layout::Pointer => bug!("It is not possible to convert Pointer directly to Type.") + } + } } /* Memory-managed object interface to type handles. */ diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index e6794149fc..132b0a910b 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -11,29 +11,17 @@ #![allow(non_camel_case_types)] use rustc::hir::def_id::DefId; -use rustc::ty::subst; use abi::FnType; use adt; use common::*; use machine; -use rustc::traits::Reveal; use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::subst::Substs; use type_::Type; use syntax::ast; -// LLVM doesn't like objects that are too big. Issue #17913 -fn ensure_array_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - llet: Type, - size: machine::llsize, - scapegoat: Ty<'tcx>) { - let esz = machine::llsize_of_alloc(ccx, llet); - match esz.checked_mul(size) { - Some(n) if n < ccx.obj_size_bound() => {} - _ => { ccx.report_overbig_object(scapegoat) } - } -} // A "sizing type" is an LLVM type, the size and alignment of which are // guaranteed to be equivalent to what you would get out of `type_of()`. It's @@ -82,7 +70,6 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ ty::TyArray(ty, size) => { let llty = sizing_type_of(cx, ty); let size = size as u64; - ensure_array_fits_in_address_space(cx, llty, size, t); Type::array(&llty, size) } @@ -90,27 +77,20 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ Type::nil(cx) } - ty::TyTuple(..) | ty::TyEnum(..) | ty::TyClosure(..) => { - let repr = adt::represent_type(cx, t); - adt::sizing_type_of(cx, &repr, false) + ty::TyAdt(..) if t.is_simd() => { + let e = t.simd_type(cx.tcx()); + if !e.is_machine() { + cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \ + a non-machine element type `{}`", + t, e)) + } + let llet = type_of(cx, e); + let n = t.simd_size(cx.tcx()) as u64; + Type::vector(&llet, n) } - ty::TyStruct(..) => { - if t.is_simd() { - let e = t.simd_type(cx.tcx()); - if !e.is_machine() { - cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \ - a non-machine element type `{}`", - t, e)) - } - let llet = type_of(cx, e); - let n = t.simd_size(cx.tcx()) as u64; - ensure_array_fits_in_address_space(cx, llet, n, t); - Type::vector(&llet, n) - } else { - let repr = adt::represent_type(cx, t); - adt::sizing_type_of(cx, &repr, false) - } + ty::TyTuple(..) | ty::TyAdt(..) | ty::TyClosure(..) => { + adt::sizing_type_of(cx, t, false) } ty::TyProjection(..) | ty::TyInfer(..) | ty::TyParam(..) | @@ -125,37 +105,31 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ cx.llsizingtypes().borrow_mut().insert(t, llsizingty); // FIXME(eddyb) Temporary sanity check for ty::layout. - let layout = cx.tcx().normalizing_infer_ctxt(Reveal::All).enter(|infcx| { - t.layout(&infcx) - }); - match layout { - Ok(layout) => { - if !type_is_sized(cx.tcx(), t) { - if !layout.is_unsized() { - bug!("layout should be unsized for type `{}` / {:#?}", - t, layout); - } - - // Unsized types get turned into a fat pointer for LLVM. - return llsizingty; - } - let r = layout.size(&cx.tcx().data_layout).bytes(); - let l = machine::llsize_of_alloc(cx, llsizingty); - if r != l { - bug!("size differs (rustc: {}, llvm: {}) for type `{}` / {:#?}", - r, l, t, layout); - } - let r = layout.align(&cx.tcx().data_layout).abi(); - let l = machine::llalign_of_min(cx, llsizingty) as u64; - if r != l { - bug!("align differs (rustc: {}, llvm: {}) for type `{}` / {:#?}", - r, l, t, layout); - } - } - Err(e) => { - bug!("failed to get layout for `{}`: {}", t, e); + let layout = cx.layout_of(t); + if !type_is_sized(cx.tcx(), t) { + if !layout.is_unsized() { + bug!("layout should be unsized for type `{}` / {:#?}", + t, layout); } + + // Unsized types get turned into a fat pointer for LLVM. + return llsizingty; + } + + let r = layout.size(&cx.tcx().data_layout).bytes(); + let l = machine::llsize_of_alloc(cx, llsizingty); + if r != l { + bug!("size differs (rustc: {}, llvm: {}) for type `{}` / {:#?}", + r, l, t, layout); + } + + let r = layout.align(&cx.tcx().data_layout).abi(); + let l = machine::llalign_of_min(cx, llsizingty) as u64; + if r != l { + bug!("align differs (rustc: {}, llvm: {}) for type `{}` / {:#?}", + r, l, t, layout); } + llsizingty } @@ -251,25 +225,10 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ty::TyUint(t) => Type::uint_from_ty(cx, t), ty::TyFloat(t) => Type::float_from_ty(cx, t), ty::TyNever => Type::nil(cx), - ty::TyEnum(def, ref substs) => { - // Only create the named struct, but don't fill it in. We - // fill it in *after* placing it into the type cache. This - // avoids creating more than one copy of the enum when one - // of the enum's variants refers to the enum itself. - let repr = adt::represent_type(cx, t); - let tps = substs.types.get_slice(subst::TypeSpace); - let name = llvm_type_name(cx, def.did, tps); - adt::incomplete_type_of(cx, &repr, &name[..]) - } ty::TyClosure(..) => { // Only create the named struct, but don't fill it in. We // fill it in *after* placing it into the type cache. - let repr = adt::represent_type(cx, t); - // Unboxed closures can have substitutions in all spaces - // inherited from their environment, so we use entire - // contents of the VecPerParamSpace to construct the llvm - // name - adt::incomplete_type_of(cx, &repr, "closure") + adt::incomplete_type_of(cx, t, "closure") } ty::TyBox(ty) | @@ -292,11 +251,6 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ty::TyArray(ty, size) => { let size = size as u64; - // we must use `sizing_type_of` here as the type may - // not be fully initialized. - let szty = sizing_type_of(cx, ty); - ensure_array_fits_in_address_space(cx, szty, size, t); - let llty = in_memory_type_of(cx, ty); Type::array(&llty, size) } @@ -310,36 +264,31 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ty::TyFnDef(..) => Type::nil(cx), ty::TyFnPtr(f) => { - let sig = cx.tcx().erase_late_bound_regions(&f.sig); - let sig = cx.tcx().normalize_associated_type(&sig); + let sig = cx.tcx().erase_late_bound_regions_and_normalize(&f.sig); FnType::new(cx, f.abi, &sig, &[]).llvm_type(cx).ptr_to() } ty::TyTuple(ref tys) if tys.is_empty() => Type::nil(cx), ty::TyTuple(..) => { - let repr = adt::represent_type(cx, t); - adt::type_of(cx, &repr) + adt::type_of(cx, t) } - ty::TyStruct(def, ref substs) => { - if t.is_simd() { - let e = t.simd_type(cx.tcx()); - if !e.is_machine() { - cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \ - a non-machine element type `{}`", - t, e)) - } - let llet = in_memory_type_of(cx, e); - let n = t.simd_size(cx.tcx()) as u64; - ensure_array_fits_in_address_space(cx, llet, n, t); - Type::vector(&llet, n) - } else { - // Only create the named struct, but don't fill it in. We fill it - // in *after* placing it into the type cache. This prevents - // infinite recursion with recursive struct types. - let repr = adt::represent_type(cx, t); - let tps = substs.types.get_slice(subst::TypeSpace); - let name = llvm_type_name(cx, def.did, tps); - adt::incomplete_type_of(cx, &repr, &name[..]) + ty::TyAdt(..) if t.is_simd() => { + let e = t.simd_type(cx.tcx()); + if !e.is_machine() { + cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \ + a non-machine element type `{}`", + t, e)) } + let llet = in_memory_type_of(cx, e); + let n = t.simd_size(cx.tcx()) as u64; + Type::vector(&llet, n) + } + ty::TyAdt(def, substs) => { + // Only create the named struct, but don't fill it in. We + // fill it in *after* placing it into the type cache. This + // avoids creating more than one copy of the enum when one + // of the enum's variants refers to the enum itself. + let name = llvm_type_name(cx, def.did, substs); + adt::incomplete_type_of(cx, t, &name[..]) } ty::TyInfer(..) | @@ -355,10 +304,8 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> // If this was an enum or struct, fill in the type now. match t.sty { - ty::TyEnum(..) | ty::TyStruct(..) | ty::TyClosure(..) - if !t.is_simd() => { - let repr = adt::represent_type(cx, t); - adt::finish_type_of(cx, &repr, &mut llty); + ty::TyAdt(..) | ty::TyClosure(..) if !t.is_simd() => { + adt::finish_type_of(cx, t, &mut llty); } _ => () } @@ -368,23 +315,23 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> pub fn align_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> machine::llalign { - let llty = sizing_type_of(cx, t); - machine::llalign_of_min(cx, llty) + let layout = cx.layout_of(t); + layout.align(&cx.tcx().data_layout).abi() as machine::llalign } fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, did: DefId, - tps: &[Ty<'tcx>]) + substs: &Substs<'tcx>) -> String { let base = cx.tcx().item_path_str(did); - let strings: Vec = tps.iter().map(|t| t.to_string()).collect(); + let strings: Vec = substs.types().map(|t| t.to_string()).collect(); let tstr = if strings.is_empty() { base } else { format!("{}<{}>", base, strings.join(", ")) }; - if did.krate == 0 { + if did.is_local() { tstr } else { format!("{}.{}", did.krate, tstr) diff --git a/src/librustc_typeck/astconv.rs b/src/librustc_typeck/astconv.rs index ed67c9fbe3..f5e289c330 100644 --- a/src/librustc_typeck/astconv.rs +++ b/src/librustc_typeck/astconv.rs @@ -55,8 +55,7 @@ use hir::def_id::DefId; use hir::print as pprust; use middle::resolve_lifetime as rl; use rustc::lint; -use rustc::ty::subst::{FnSpace, TypeSpace, SelfSpace, Subst, Substs, ParamSpace}; -use rustc::ty::subst::VecPerParamSpace; +use rustc::ty::subst::{Subst, Substs}; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt, ToPredicate, TypeFoldable}; use rustc::ty::wf::object_region_bounds; @@ -82,6 +81,10 @@ pub trait AstConv<'gcx, 'tcx> { /// A cache used for the result of `ast_ty_to_ty_cache` fn ast_ty_to_ty_cache(&self) -> &RefCell>>; + /// Returns the generic type and lifetime parameters for an item. + fn get_generics(&self, span: Span, id: DefId) + -> Result<&'tcx ty::Generics<'tcx>, ErrorReported>; + /// Identify the type scheme for an item with a type, like a type /// alias, fn, or struct. This allows you to figure out the set of /// type parameters defined on the item. @@ -116,11 +119,15 @@ pub trait AstConv<'gcx, 'tcx> { fn get_free_substs(&self) -> Option<&Substs<'tcx>>; /// What type should we use when a type is omitted? - fn ty_infer(&self, - param_and_substs: Option>, - substs: Option<&mut Substs<'tcx>>, - space: Option, - span: Span) -> Ty<'tcx>; + fn ty_infer(&self, span: Span) -> Ty<'tcx>; + + /// Same as ty_infer, but with a known type parameter definition. + fn ty_infer_for_def(&self, + _def: &ty::TypeParameterDef<'tcx>, + _substs: &Substs<'tcx>, + span: Span) -> Ty<'tcx> { + self.ty_infer(span) + } /// Projecting an associated type from a (potentially) /// higher-ranked trait reference is more complicated, because of @@ -166,8 +173,14 @@ struct ConvertedBinding<'tcx> { type TraitAndProjections<'tcx> = (ty::PolyTraitRef<'tcx>, Vec>); -pub fn ast_region_to_region(tcx: TyCtxt, lifetime: &hir::Lifetime) - -> ty::Region { +/// Dummy type used for the `Self` of a `TraitRef` created for converting +/// a trait object, and which gets removed in `ExistentialTraitRef`. +/// This type must not appear anywhere in other converted types. +const TRAIT_OBJECT_DUMMY_SELF: ty::TypeVariants<'static> = ty::TyInfer(ty::FreshTy(0)); + +pub fn ast_region_to_region<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + lifetime: &hir::Lifetime) + -> &'tcx ty::Region { let r = match tcx.named_region_map.defs.get(&lifetime.id) { None => { // should have been recorded by the `resolve_lifetime` pass @@ -195,9 +208,8 @@ pub fn ast_region_to_region(tcx: TyCtxt, lifetime: &hir::Lifetime) issue_32330)) } - Some(&rl::DefEarlyBoundRegion(space, index, _)) => { + Some(&rl::DefEarlyBoundRegion(index, _)) => { ty::ReEarlyBound(ty::EarlyBoundRegion { - space: space, index: index, name: lifetime.name }) @@ -227,7 +239,7 @@ pub fn ast_region_to_region(tcx: TyCtxt, lifetime: &hir::Lifetime) lifetime.id, r); - r + tcx.mk_region(r) } fn report_elision_failure( @@ -302,14 +314,14 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { pub fn opt_ast_region_to_region(&self, rscope: &RegionScope, default_span: Span, - opt_lifetime: &Option) -> ty::Region + opt_lifetime: &Option) -> &'tcx ty::Region { let r = match *opt_lifetime { Some(ref lifetime) => { ast_region_to_region(self.tcx(), lifetime) } - None => match rscope.anon_regions(default_span, 1) { + None => self.tcx().mk_region(match rscope.anon_regions(default_span, 1) { Ok(rs) => rs[0], Err(params) => { let ampersand_span = Span { hi: default_span.lo, ..default_span}; @@ -324,7 +336,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { err.emit(); ty::ReStatic } - } + }) }; debug!("opt_ast_region_to_region(opt_lifetime={:?}) yields {:?}", @@ -340,66 +352,87 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { rscope: &RegionScope, span: Span, param_mode: PathParamMode, - decl_generics: &ty::Generics<'tcx>, + def_id: DefId, item_segment: &hir::PathSegment) - -> Substs<'tcx> + -> &'tcx Substs<'tcx> { let tcx = self.tcx(); - // ast_path_substs() is only called to convert paths that are - // known to refer to traits, types, or structs. In these cases, - // all type parameters defined for the item being referenced will - // be in the TypeSpace or SelfSpace. - // - // Note: in the case of traits, the self parameter is also - // defined, but we don't currently create a `type_param_def` for - // `Self` because it is implicit. - assert!(decl_generics.regions.all(|d| d.space == TypeSpace)); - assert!(decl_generics.types.all(|d| d.space != FnSpace)); - - let (regions, types, assoc_bindings) = match item_segment.parameters { - hir::AngleBracketedParameters(ref data) => { - self.convert_angle_bracketed_parameters(rscope, span, decl_generics, data) - } + match item_segment.parameters { + hir::AngleBracketedParameters(_) => {} hir::ParenthesizedParameters(..) => { struct_span_err!(tcx.sess, span, E0214, "parenthesized parameters may only be used with a trait") .span_label(span, &format!("only traits may use parentheses")) .emit(); - let ty_param_defs = decl_generics.types.get_slice(TypeSpace); - (Substs::empty(), - ty_param_defs.iter().map(|_| tcx.types.err).collect(), - vec![]) + return Substs::for_item(tcx, def_id, |_, _| { + tcx.mk_region(ty::ReStatic) + }, |_, _| { + tcx.types.err + }); } - }; + } + + let (substs, assoc_bindings) = + self.create_substs_for_ast_path(rscope, + span, + param_mode, + def_id, + &item_segment.parameters, + None); assoc_bindings.first().map(|b| self.tcx().prohibit_projection(b.span)); - self.create_substs_for_ast_path(span, - param_mode, - decl_generics, - None, - types, - regions) + substs } - fn create_region_substs(&self, + /// Given the type/region arguments provided to some path (along with + /// an implicit Self, if this is a trait reference) returns the complete + /// set of substitutions. This may involve applying defaulted type parameters. + /// + /// Note that the type listing given here is *exactly* what the user provided. + fn create_substs_for_ast_path(&self, rscope: &RegionScope, span: Span, - decl_generics: &ty::Generics<'tcx>, - regions_provided: Vec) - -> Substs<'tcx> + param_mode: PathParamMode, + def_id: DefId, + parameters: &hir::PathParameters, + self_ty: Option>) + -> (&'tcx Substs<'tcx>, Vec>) { let tcx = self.tcx(); + debug!("create_substs_for_ast_path(def_id={:?}, self_ty={:?}, \ + parameters={:?})", + def_id, self_ty, parameters); + + let (lifetimes, num_types_provided) = match *parameters { + hir::AngleBracketedParameters(ref data) => { + if param_mode == PathParamMode::Optional && data.types.is_empty() { + (&data.lifetimes[..], None) + } else { + (&data.lifetimes[..], Some(data.types.len())) + } + } + hir::ParenthesizedParameters(_) => (&[][..], Some(1)) + }; + // If the type is parameterized by this region, then replace this // region with the current anon region binding (in other words, // whatever & would get replaced with). - let expected_num_region_params = decl_generics.regions.len(TypeSpace); - let supplied_num_region_params = regions_provided.len(); + let decl_generics = match self.get_generics(span, def_id) { + Ok(generics) => generics, + Err(ErrorReported) => { + // No convenient way to recover from a cycle here. Just bail. Sorry! + self.tcx().sess.abort_if_errors(); + bug!("ErrorReported returned, but no errors reports?") + } + }; + let expected_num_region_params = decl_generics.regions.len(); + let supplied_num_region_params = lifetimes.len(); let regions = if expected_num_region_params == supplied_num_region_params { - regions_provided + lifetimes.iter().map(|l| *ast_region_to_region(tcx, l)).collect() } else { let anon_regions = rscope.anon_regions(span, expected_num_region_params); @@ -415,184 +448,114 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { Err(_) => (0..expected_num_region_params).map(|_| ty::ReStatic).collect() } }; - Substs::new_type(vec![], regions) - } - /// Given the type/region arguments provided to some path (along with - /// an implicit Self, if this is a trait reference) returns the complete - /// set of substitutions. This may involve applying defaulted type parameters. - /// - /// Note that the type listing given here is *exactly* what the user provided. - /// - /// The `region_substs` should be the result of `create_region_substs` - /// -- that is, a substitution with no types but the correct number of - /// regions. - fn create_substs_for_ast_path(&self, - span: Span, - param_mode: PathParamMode, - decl_generics: &ty::Generics<'tcx>, - self_ty: Option>, - types_provided: Vec>, - region_substs: Substs<'tcx>) - -> Substs<'tcx> - { - let tcx = self.tcx(); + // If a self-type was declared, one should be provided. + assert_eq!(decl_generics.has_self, self_ty.is_some()); - debug!("create_substs_for_ast_path(decl_generics={:?}, self_ty={:?}, \ - types_provided={:?}, region_substs={:?})", - decl_generics, self_ty, types_provided, - region_substs); - - assert_eq!(region_substs.regions.len(TypeSpace), decl_generics.regions.len(TypeSpace)); - assert!(region_substs.types.is_empty()); - - // Convert the type parameters supplied by the user. - let ty_param_defs = decl_generics.types.get_slice(TypeSpace); - let formal_ty_param_count = ty_param_defs.len(); - let required_ty_param_count = ty_param_defs.iter() - .take_while(|x| x.default.is_none()) - .count(); - - let mut type_substs = self.get_type_substs_for_defs(span, - types_provided, - param_mode, - ty_param_defs, - region_substs.clone(), - self_ty); - - let supplied_ty_param_count = type_substs.len(); - check_type_argument_count(self.tcx(), span, supplied_ty_param_count, - required_ty_param_count, formal_ty_param_count); - - if supplied_ty_param_count < required_ty_param_count { - while type_substs.len() < required_ty_param_count { - type_substs.push(tcx.types.err); - } - } else if supplied_ty_param_count > formal_ty_param_count { - type_substs.truncate(formal_ty_param_count); + // Check the number of type parameters supplied by the user. + if let Some(num_provided) = num_types_provided { + let ty_param_defs = &decl_generics.types[self_ty.is_some() as usize..]; + check_type_argument_count(tcx, span, num_provided, ty_param_defs); } - assert!(type_substs.len() >= required_ty_param_count && - type_substs.len() <= formal_ty_param_count); - - let mut substs = region_substs; - substs.types.extend(TypeSpace, type_substs.into_iter()); - match self_ty { - None => { - // If no self-type is provided, it's still possible that - // one was declared, because this could be an object type. + let is_object = self_ty.map_or(false, |ty| ty.sty == TRAIT_OBJECT_DUMMY_SELF); + let default_needs_object_self = |p: &ty::TypeParameterDef<'tcx>| { + if let Some(ref default) = p.default { + if is_object && default.has_self_ty() { + // There is no suitable inference default for a type parameter + // that references self, in an object type. + return true; + } } - Some(ty) => { - // If a self-type is provided, one should have been - // "declared" (in other words, this should be a - // trait-ref). - assert!(decl_generics.types.get_self().is_some()); - substs.types.push(SelfSpace, ty); + + false + }; + + let mut output_assoc_binding = None; + let substs = Substs::for_item(tcx, def_id, |def, _| { + let i = def.index as usize - self_ty.is_some() as usize; + tcx.mk_region(regions[i]) + }, |def, substs| { + let i = def.index as usize; + + // Handle Self first, so we can adjust the index to match the AST. + if let (0, Some(ty)) = (i, self_ty) { + return ty; } - } - let actual_supplied_ty_param_count = substs.types.len(TypeSpace); - for param in &ty_param_defs[actual_supplied_ty_param_count..] { - if let Some(default) = param.default { + let i = i - self_ty.is_some() as usize - decl_generics.regions.len(); + if num_types_provided.map_or(false, |n| i < n) { + // A provided type parameter. + match *parameters { + hir::AngleBracketedParameters(ref data) => { + self.ast_ty_arg_to_ty(rscope, Some(def), substs, &data.types[i]) + } + hir::ParenthesizedParameters(ref data) => { + assert_eq!(i, 0); + let (ty, assoc) = + self.convert_parenthesized_parameters(rscope, substs, data); + output_assoc_binding = Some(assoc); + ty + } + } + } else if num_types_provided.is_none() { + // No type parameters were provided, we can infer all. + let ty_var = if !default_needs_object_self(def) { + self.ty_infer_for_def(def, substs, span) + } else { + self.ty_infer(span) + }; + ty_var + } else if let Some(default) = def.default { + // No type parameter provided, but a default exists. + // If we are converting an object type, then the // `Self` parameter is unknown. However, some of the // other type parameters may reference `Self` in their // defaults. This will lead to an ICE if we are not // careful! - if self_ty.is_none() && default.has_self_ty() { - span_err!(tcx.sess, span, E0393, - "the type parameter `{}` must be explicitly specified \ - in an object type because its default value `{}` references \ - the type `Self`", - param.name, - default); - substs.types.push(TypeSpace, tcx.types.err); + if default_needs_object_self(def) { + struct_span_err!(tcx.sess, span, E0393, + "the type parameter `{}` must be explicitly specified", + def.name) + .span_label(span, &format!("missing reference to `{}`", def.name)) + .note(&format!("because of the default `Self` reference, \ + type parameters must be specified on object types")) + .emit(); + tcx.types.err } else { // This is a default type parameter. - let default = default.subst_spanned(tcx, - &substs, - Some(span)); - substs.types.push(TypeSpace, default); + default.subst_spanned(tcx, substs, Some(span)) } } else { - span_bug!(span, "extra parameter without default"); + // We've already errored above about the mismatch. + tcx.types.err } - } - - debug!("create_substs_for_ast_path(decl_generics={:?}, self_ty={:?}) -> {:?}", - decl_generics, self_ty, substs); - - substs - } + }); - /// Returns types_provided if it is not empty, otherwise populating the - /// type parameters with inference variables as appropriate. - fn get_type_substs_for_defs(&self, - span: Span, - types_provided: Vec>, - param_mode: PathParamMode, - ty_param_defs: &[ty::TypeParameterDef<'tcx>], - mut substs: Substs<'tcx>, - self_ty: Option>) - -> Vec> - { - fn default_type_parameter<'tcx>(p: &ty::TypeParameterDef<'tcx>, self_ty: Option>) - -> Option> - { - if let Some(ref default) = p.default { - if self_ty.is_none() && default.has_self_ty() { - // There is no suitable inference default for a type parameter - // that references self with no self-type provided. - return None; - } + let assoc_bindings = match *parameters { + hir::AngleBracketedParameters(ref data) => { + data.bindings.iter().map(|b| { + ConvertedBinding { + item_name: b.name, + ty: self.ast_ty_to_ty(rscope, &b.ty), + span: b.span + } + }).collect() } + hir::ParenthesizedParameters(ref data) => { + vec![output_assoc_binding.unwrap_or_else(|| { + // This is an error condition, but we should + // get the associated type binding anyway. + self.convert_parenthesized_parameters(rscope, substs, data).1 + })] + } + }; - Some(p.clone()) - } - - if param_mode == PathParamMode::Optional && types_provided.is_empty() { - ty_param_defs - .iter() - .map(|p| self.ty_infer(default_type_parameter(p, self_ty), Some(&mut substs), - Some(TypeSpace), span)) - .collect() - } else { - types_provided - } - } + debug!("create_substs_for_ast_path(decl_generics={:?}, self_ty={:?}) -> {:?}", + decl_generics, self_ty, substs); - fn convert_angle_bracketed_parameters(&self, - rscope: &RegionScope, - span: Span, - decl_generics: &ty::Generics<'tcx>, - data: &hir::AngleBracketedParameterData) - -> (Substs<'tcx>, - Vec>, - Vec>) - { - let regions: Vec<_> = - data.lifetimes.iter() - .map(|l| ast_region_to_region(self.tcx(), l)) - .collect(); - - let region_substs = - self.create_region_substs(rscope, span, decl_generics, regions); - - let types: Vec<_> = - data.types.iter() - .enumerate() - .map(|(i,t)| self.ast_ty_arg_to_ty(rscope, decl_generics, - i, ®ion_substs, t)) - .collect(); - - let assoc_bindings: Vec<_> = - data.bindings.iter() - .map(|b| ConvertedBinding { item_name: b.name, - ty: self.ast_ty_to_ty(rscope, &b.ty), - span: b.span }) - .collect(); - - (region_substs, types, assoc_bindings) + (substs, assoc_bindings) } /// Returns the appropriate lifetime to use for any output lifetimes @@ -628,7 +591,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { } if lifetimes_for_params.iter().map(|e| e.lifetime_count).sum::() == 1 { - Ok(possible_implied_output_region.unwrap()) + Ok(*possible_implied_output_region.unwrap()) } else { Err(Some(lifetimes_for_params)) } @@ -657,29 +620,18 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { fn convert_parenthesized_parameters(&self, rscope: &RegionScope, - span: Span, - decl_generics: &ty::Generics<'tcx>, + region_substs: &Substs<'tcx>, data: &hir::ParenthesizedParameterData) - -> (Substs<'tcx>, - Vec>, - Vec>) + -> (Ty<'tcx>, ConvertedBinding<'tcx>) { - let region_substs = - self.create_region_substs(rscope, span, decl_generics, Vec::new()); - let anon_scope = rscope.anon_type_scope(); let binding_rscope = MaybeWithAnonTypes::new(BindingRscope::new(), anon_scope); - let inputs = - data.inputs.iter() - .map(|a_t| self.ast_ty_arg_to_ty(&binding_rscope, decl_generics, - 0, ®ion_substs, a_t)) - .collect::>>(); - + let inputs: Vec<_> = data.inputs.iter().map(|a_t| { + self.ast_ty_arg_to_ty(&binding_rscope, None, region_substs, a_t) + }).collect(); let input_params = vec![String::new(); inputs.len()]; let implied_output_region = self.find_implied_output_region(&inputs, input_params); - let input_ty = self.tcx().mk_tup(inputs); - let (output, output_span) = match data.output { Some(ref output_ty) => { (self.convert_ty_with_lifetime_elision(implied_output_region, @@ -698,13 +650,13 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { span: output_span }; - (region_substs, vec![input_ty], vec![output_binding]) + (self.tcx().mk_tup(inputs), output_binding) } pub fn instantiate_poly_trait_ref(&self, rscope: &RegionScope, ast_trait_ref: &hir::PolyTraitRef, - self_ty: Option>, + self_ty: Ty<'tcx>, poly_projections: &mut Vec>) -> ty::PolyTraitRef<'tcx> { @@ -729,7 +681,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { pub fn instantiate_mono_trait_ref(&self, rscope: &RegionScope, trait_ref: &hir::TraitRef, - self_ty: Option>) + self_ty: Ty<'tcx>) -> ty::TraitRef<'tcx> { let trait_def_id = self.trait_def_id(trait_ref); @@ -755,32 +707,12 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { } } - fn object_path_to_poly_trait_ref(&self, - rscope: &RegionScope, - span: Span, - param_mode: PathParamMode, - trait_def_id: DefId, - trait_path_ref_id: ast::NodeId, - trait_segment: &hir::PathSegment, - mut projections: &mut Vec>) - -> ty::PolyTraitRef<'tcx> - { - self.ast_path_to_poly_trait_ref(rscope, - span, - param_mode, - trait_def_id, - None, - trait_path_ref_id, - trait_segment, - projections) - } - fn ast_path_to_poly_trait_ref(&self, rscope: &RegionScope, span: Span, param_mode: PathParamMode, trait_def_id: DefId, - self_ty: Option>, + self_ty: Ty<'tcx>, path_id: ast::NodeId, trait_segment: &hir::PathSegment, poly_projections: &mut Vec>) @@ -803,21 +735,14 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { trait_segment); let poly_trait_ref = ty::Binder(ty::TraitRef::new(trait_def_id, substs)); - { - let converted_bindings = - assoc_bindings - .iter() - .filter_map(|binding| { - // specify type to assert that error was already reported in Err case: - let predicate: Result<_, ErrorReported> = - self.ast_type_binding_to_poly_projection_predicate(path_id, - poly_trait_ref.clone(), - self_ty, - binding); - predicate.ok() // ok to ignore Err() because ErrorReported (see above) - }); - poly_projections.extend(converted_bindings); - } + poly_projections.extend(assoc_bindings.iter().filter_map(|binding| { + // specify type to assert that error was already reported in Err case: + let predicate: Result<_, ErrorReported> = + self.ast_type_binding_to_poly_projection_predicate(path_id, + poly_trait_ref, + binding); + predicate.ok() // ok to ignore Err() because ErrorReported (see above) + })); debug!("ast_path_to_poly_trait_ref(trait_segment={:?}, projections={:?}) -> {:?}", trait_segment, poly_projections, poly_trait_ref); @@ -829,7 +754,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { span: Span, param_mode: PathParamMode, trait_def_id: DefId, - self_ty: Option>, + self_ty: Ty<'tcx>, trait_segment: &hir::PathSegment) -> ty::TraitRef<'tcx> { @@ -849,7 +774,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { span: Span, param_mode: PathParamMode, trait_def_id: DefId, - self_ty: Option>, + self_ty: Ty<'tcx>, trait_segment: &hir::PathSegment) -> (&'tcx Substs<'tcx>, Vec>) { @@ -865,50 +790,43 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { } }; - let (regions, types, assoc_bindings) = match trait_segment.parameters { - hir::AngleBracketedParameters(ref data) => { + match trait_segment.parameters { + hir::AngleBracketedParameters(_) => { // For now, require that parenthetical notation be used // only with `Fn()` etc. if !self.tcx().sess.features.borrow().unboxed_closures && trait_def.paren_sugar { - emit_feature_err(&self.tcx().sess.parse_sess.span_diagnostic, + emit_feature_err(&self.tcx().sess.parse_sess, "unboxed_closures", span, GateIssue::Language, "\ the precise format of `Fn`-family traits' \ type parameters is subject to change. \ Use parenthetical notation (Fn(Foo, Bar) -> Baz) instead"); } - - self.convert_angle_bracketed_parameters(rscope, span, &trait_def.generics, data) } - hir::ParenthesizedParameters(ref data) => { + hir::ParenthesizedParameters(_) => { // For now, require that parenthetical notation be used // only with `Fn()` etc. if !self.tcx().sess.features.borrow().unboxed_closures && !trait_def.paren_sugar { - emit_feature_err(&self.tcx().sess.parse_sess.span_diagnostic, + emit_feature_err(&self.tcx().sess.parse_sess, "unboxed_closures", span, GateIssue::Language, "\ parenthetical notation is only stable when used with `Fn`-family traits"); } - - self.convert_parenthesized_parameters(rscope, span, &trait_def.generics, data) } - }; - - let substs = self.create_substs_for_ast_path(span, - param_mode, - &trait_def.generics, - self_ty, - types, - regions); + } - (self.tcx().mk_substs(substs), assoc_bindings) + self.create_substs_for_ast_path(rscope, + span, + param_mode, + trait_def_id, + &trait_segment.parameters, + Some(self_ty)) } fn ast_type_binding_to_poly_projection_predicate( &self, path_id: ast::NodeId, - mut trait_ref: ty::PolyTraitRef<'tcx>, - self_ty: Option>, + trait_ref: ty::PolyTraitRef<'tcx>, binding: &ConvertedBinding<'tcx>) -> Result, ErrorReported> { @@ -962,62 +880,39 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { // Simple case: X is defined in the current trait. if self.trait_defines_associated_type_named(trait_ref.def_id(), binding.item_name) { - return Ok(ty::Binder(ty::ProjectionPredicate { // <-------------------+ - projection_ty: ty::ProjectionTy { // | - trait_ref: trait_ref.skip_binder().clone(), // Binder moved here --+ - item_name: binding.item_name, - }, - ty: binding.ty, + return Ok(trait_ref.map_bound(|trait_ref| { + ty::ProjectionPredicate { + projection_ty: ty::ProjectionTy { + trait_ref: trait_ref, + item_name: binding.item_name, + }, + ty: binding.ty, + } })); } // Otherwise, we have to walk through the supertraits to find - // those that do. This is complicated by the fact that, for an - // object type, the `Self` type is not present in the - // substitutions (after all, it's being constructed right now), - // but the `supertraits` iterator really wants one. To handle - // this, we currently insert a dummy type and then remove it - // later. Yuck. - - let dummy_self_ty = tcx.mk_infer(ty::FreshTy(0)); - if self_ty.is_none() { // if converting for an object type - let mut dummy_substs = trait_ref.skip_binder().substs.clone(); // binder moved here -+ - assert!(dummy_substs.self_ty().is_none()); // | - dummy_substs.types.push(SelfSpace, dummy_self_ty); // | - trait_ref = ty::Binder(ty::TraitRef::new(trait_ref.def_id(), // <------------+ - tcx.mk_substs(dummy_substs))); - } - + // those that do. self.ensure_super_predicates(binding.span, trait_ref.def_id())?; - let mut candidates: Vec = + let candidates: Vec = traits::supertraits(tcx, trait_ref.clone()) .filter(|r| self.trait_defines_associated_type_named(r.def_id(), binding.item_name)) .collect(); - // If converting for an object type, then remove the dummy-ty from `Self` now. - // Yuckety yuck. - if self_ty.is_none() { - for candidate in &mut candidates { - let mut dummy_substs = candidate.0.substs.clone(); - assert!(dummy_substs.self_ty() == Some(dummy_self_ty)); - dummy_substs.types.pop(SelfSpace); - *candidate = ty::Binder(ty::TraitRef::new(candidate.def_id(), - tcx.mk_substs(dummy_substs))); - } - } - let candidate = self.one_bound_for_assoc_type(candidates, &trait_ref.to_string(), &binding.item_name.as_str(), binding.span)?; - Ok(ty::Binder(ty::ProjectionPredicate { // <-------------------------+ - projection_ty: ty::ProjectionTy { // | - trait_ref: candidate.skip_binder().clone(), // binder is moved up here --+ - item_name: binding.item_name, - }, - ty: binding.ty, + Ok(candidate.map_bound(|trait_ref| { + ty::ProjectionPredicate { + projection_ty: ty::ProjectionTy { + trait_ref: trait_ref, + item_name: binding.item_name, + }, + ty: binding.ty, + } })) } @@ -1030,10 +925,8 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { -> Ty<'tcx> { let tcx = self.tcx(); - let (generics, decl_ty) = match self.get_item_type_scheme(span, did) { - Ok(ty::TypeScheme { generics, ty: decl_ty }) => { - (generics, decl_ty) - } + let decl_ty = match self.get_item_type_scheme(span, did) { + Ok(type_scheme) => type_scheme.ty, Err(ErrorReported) => { return tcx.types.err; } @@ -1042,23 +935,24 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { let substs = self.ast_path_substs_for_ty(rscope, span, param_mode, - &generics, + did, item_segment); // FIXME(#12938): This is a hack until we have full support for DST. if Some(did) == self.tcx().lang_items.owned_box() { - assert_eq!(substs.types.len(TypeSpace), 1); - return self.tcx().mk_box(*substs.types.get(TypeSpace, 0)); + assert_eq!(substs.types().count(), 1); + return self.tcx().mk_box(substs.type_at(0)); } - decl_ty.subst(self.tcx(), &substs) + decl_ty.subst(self.tcx(), substs) } - fn ast_ty_to_trait_ref(&self, - rscope: &RegionScope, - ty: &hir::Ty, - bounds: &[hir::TyParamBound]) - -> Result, ErrorReported> + fn ast_ty_to_object_trait_ref(&self, + rscope: &RegionScope, + span: Span, + ty: &hir::Ty, + bounds: &[hir::TyParamBound]) + -> Ty<'tcx> { /*! * In a type like `Foo + Send`, we want to wait to collect the @@ -1071,33 +965,32 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { * name, and reports an error otherwise. */ + let tcx = self.tcx(); match ty.node { hir::TyPath(None, ref path) => { - let resolution = self.tcx().expect_resolution(ty.id); + let resolution = tcx.expect_resolution(ty.id); match resolution.base_def { Def::Trait(trait_def_id) if resolution.depth == 0 => { - let mut projection_bounds = Vec::new(); - let trait_ref = - self.object_path_to_poly_trait_ref(rscope, - path.span, - PathParamMode::Explicit, - trait_def_id, - ty.id, - path.segments.last().unwrap(), - &mut projection_bounds); - Ok((trait_ref, projection_bounds)) + self.trait_path_to_object_type(rscope, + path.span, + PathParamMode::Explicit, + trait_def_id, + ty.id, + path.segments.last().unwrap(), + span, + partition_bounds(tcx, span, bounds)) } _ => { - struct_span_err!(self.tcx().sess, ty.span, E0172, + struct_span_err!(tcx.sess, ty.span, E0172, "expected a reference to a trait") .span_label(ty.span, &format!("expected a trait")) .emit(); - Err(ErrorReported) + tcx.types.err } } } _ => { - let mut err = struct_span_err!(self.tcx().sess, ty.span, E0178, + let mut err = struct_span_err!(tcx.sess, ty.span, E0178, "expected a path on the left-hand side \ of `+`, not `{}`", pprust::ty_to_string(ty)); @@ -1136,44 +1029,93 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { } } err.emit(); - Err(ErrorReported) + tcx.types.err } } } - fn trait_ref_to_object_type(&self, - rscope: &RegionScope, - span: Span, - trait_ref: ty::PolyTraitRef<'tcx>, - projection_bounds: Vec>, - bounds: &[hir::TyParamBound]) - -> Ty<'tcx> - { - let existential_bounds = self.conv_existential_bounds(rscope, - span, - trait_ref.clone(), - projection_bounds, - bounds); - - let result = self.make_object_type(span, trait_ref, existential_bounds); - debug!("trait_ref_to_object_type: result={:?}", - result); - - result + /// Transform a PolyTraitRef into a PolyExistentialTraitRef by + /// removing the dummy Self type (TRAIT_OBJECT_DUMMY_SELF). + fn trait_ref_to_existential(&self, trait_ref: ty::TraitRef<'tcx>) + -> ty::ExistentialTraitRef<'tcx> { + assert_eq!(trait_ref.self_ty().sty, TRAIT_OBJECT_DUMMY_SELF); + ty::ExistentialTraitRef::erase_self_ty(self.tcx(), trait_ref) } - fn make_object_type(&self, - span: Span, - principal: ty::PolyTraitRef<'tcx>, - bounds: ty::ExistentialBounds<'tcx>) - -> Ty<'tcx> { + fn trait_path_to_object_type(&self, + rscope: &RegionScope, + path_span: Span, + param_mode: PathParamMode, + trait_def_id: DefId, + trait_path_ref_id: ast::NodeId, + trait_segment: &hir::PathSegment, + span: Span, + partitioned_bounds: PartitionedBounds) + -> Ty<'tcx> { let tcx = self.tcx(); - let object = ty::TraitTy { - principal: principal, - bounds: bounds + + let mut projection_bounds = vec![]; + let dummy_self = tcx.mk_ty(TRAIT_OBJECT_DUMMY_SELF); + let principal = self.ast_path_to_poly_trait_ref(rscope, + path_span, + param_mode, + trait_def_id, + dummy_self, + trait_path_ref_id, + trait_segment, + &mut projection_bounds); + + let PartitionedBounds { builtin_bounds, + trait_bounds, + region_bounds } = + partitioned_bounds; + + if !trait_bounds.is_empty() { + let b = &trait_bounds[0]; + let span = b.trait_ref.path.span; + struct_span_err!(self.tcx().sess, span, E0225, + "only the builtin traits can be used as closure or object bounds") + .span_label(span, &format!("non-builtin trait used as bounds")) + .emit(); + } + + // Erase the dummy_self (TRAIT_OBJECT_DUMMY_SELF) used above. + let existential_principal = principal.map_bound(|trait_ref| { + self.trait_ref_to_existential(trait_ref) + }); + let existential_projections = projection_bounds.iter().map(|bound| { + bound.map_bound(|b| { + let p = b.projection_ty; + ty::ExistentialProjection { + trait_ref: self.trait_ref_to_existential(p.trait_ref), + item_name: p.item_name, + ty: b.ty + } + }) + }).collect(); + + let region_bound = + self.compute_object_lifetime_bound(span, + ®ion_bounds, + existential_principal, + builtin_bounds); + + let region_bound = match region_bound { + Some(r) => r, + None => { + tcx.mk_region(match rscope.object_lifetime_default(span) { + Some(r) => r, + None => { + span_err!(self.tcx().sess, span, E0228, + "the lifetime bound for this object type cannot be deduced \ + from context; please supply an explicit bound"); + ty::ReStatic + } + }) + } }; - let object_trait_ref = - object.principal_trait_ref_with_self_ty(tcx, tcx.types.err); + + debug!("region_bound: {:?}", region_bound); // ensure the super predicates and stop if we encountered an error if self.ensure_super_predicates(span, principal.def_id()).is_err() { @@ -1187,23 +1129,30 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { tcx.astconv_object_safety_violations(principal.def_id()); if !object_safety_violations.is_empty() { tcx.report_object_safety_error( - span, principal.def_id(), None, object_safety_violations) - .unwrap().emit(); + span, principal.def_id(), object_safety_violations) + .emit(); return tcx.types.err; } - let mut associated_types: FnvHashSet<(DefId, ast::Name)> = - traits::supertraits(tcx, object_trait_ref) - .flat_map(|tr| { - let trait_def = tcx.lookup_trait_def(tr.def_id()); - trait_def.associated_type_names - .clone() - .into_iter() - .map(move |associated_type_name| (tr.def_id(), associated_type_name)) - }) - .collect(); + let mut associated_types = FnvHashSet::default(); + for tr in traits::supertraits(tcx, principal) { + if let Some(trait_id) = tcx.map.as_local_node_id(tr.def_id()) { + use collect::trait_associated_type_names; + + associated_types.extend(trait_associated_type_names(tcx, trait_id) + .map(|name| (tr.def_id(), name))) + } else { + let trait_items = tcx.impl_or_trait_items(tr.def_id()); + associated_types.extend(trait_items.iter().filter_map(|&def_id| { + match tcx.impl_or_trait_item(def_id) { + ty::TypeTraitItem(ref item) => Some(item.name), + _ => None + } + }).map(|name| (tr.def_id(), name))); + } + } - for projection_bound in &object.bounds.projection_bounds { + for projection_bound in &projection_bounds { let pair = (projection_bound.0.projection_ty.trait_ref.def_id, projection_bound.0.projection_ty.item_name); associated_types.remove(&pair); @@ -1219,7 +1168,14 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { .emit(); } - tcx.mk_trait(object.principal, object.bounds) + let ty = tcx.mk_trait(ty::TraitObject { + principal: existential_principal, + region_bound: region_bound, + builtin_bounds: builtin_bounds, + projection_bounds: existential_projections + }); + debug!("trait_object_type: {:?}", ty); + ty } fn report_ambiguous_associated_type(&self, @@ -1335,23 +1291,17 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { // Find the type of the associated item, and the trait where the associated // item is declared. let bound = match (&ty.sty, ty_path_def) { - (_, Def::SelfTy(Some(trait_did), Some(impl_id))) => { - // For Def::SelfTy() values inlined from another crate, the - // impl_id will be DUMMY_NODE_ID, which would cause problems - // here. But we should never run into an impl from another crate - // in this pass. - assert!(impl_id != ast::DUMMY_NODE_ID); - + (_, Def::SelfTy(Some(_), Some(impl_def_id))) => { // `Self` in an impl of a trait - we have a concrete self type and a // trait reference. - let trait_ref = tcx.impl_trait_ref(tcx.map.local_def_id(impl_id)).unwrap(); + let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); let trait_ref = if let Some(free_substs) = self.get_free_substs() { trait_ref.subst(tcx, free_substs) } else { trait_ref }; - if self.ensure_super_predicates(span, trait_did).is_err() { + if self.ensure_super_predicates(span, trait_ref.def_id).is_err() { return (tcx.types.err, Def::Err); } @@ -1379,8 +1329,9 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { Err(ErrorReported) => return (tcx.types.err, Def::Err), } } - (&ty::TyParam(_), Def::TyParam(_, _, param_did, param_name)) => { + (&ty::TyParam(_), Def::TyParam(param_did)) => { let param_node_id = tcx.map.as_local_node_id(param_did).unwrap(); + let param_name = tcx.type_parameter_def(param_node_id).name; match self.find_bound_for_assoc_item(param_node_id, param_name, assoc_name, @@ -1390,10 +1341,13 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { } } _ => { - self.report_ambiguous_associated_type(span, - &ty.to_string(), - "Trait", - &assoc_name.as_str()); + // Don't print TyErr to the user. + if !ty.references_error() { + self.report_ambiguous_associated_type(span, + &ty.to_string(), + "Trait", + &assoc_name.as_str()); + } return (tcx.types.err, Def::Err); } }; @@ -1405,7 +1359,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { // `ty::trait_items` used below requires information generated // by type collection, which may be in progress at this point. match tcx.map.expect_item(trait_id).node { - hir::ItemTrait(_, _, _, ref trait_items) => { + hir::ItemTrait(.., ref trait_items) => { let item = trait_items.iter() .find(|i| i.name == assoc_name) .expect("missing associated type"); @@ -1419,7 +1373,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { item.expect("missing associated type").def_id() }; - (ty, Def::AssociatedTy(trait_did, item_did)) + (ty, Def::AssociatedTy(item_did)) } fn qpath_to_ty(&self, @@ -1453,7 +1407,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { span, param_mode, trait_def_id, - Some(self_ty), + self_ty, trait_segment); debug!("qpath_to_ty: trait_ref={:?}", trait_ref); @@ -1468,24 +1422,20 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { /// # Parameters /// /// * `this`, `rscope`: the surrounding context - /// * `decl_generics`: the generics of the struct/enum/trait declaration being - /// referenced - /// * `index`: the index of the type parameter being instantiated from the list - /// (we assume it is in the `TypeSpace`) + /// * `def`: the type parameter being instantiated (if available) /// * `region_substs`: a partial substitution consisting of /// only the region type parameters being supplied to this type. /// * `ast_ty`: the ast representation of the type being supplied - pub fn ast_ty_arg_to_ty(&self, - rscope: &RegionScope, - decl_generics: &ty::Generics<'tcx>, - index: usize, - region_substs: &Substs<'tcx>, - ast_ty: &hir::Ty) - -> Ty<'tcx> + fn ast_ty_arg_to_ty(&self, + rscope: &RegionScope, + def: Option<&ty::TypeParameterDef<'tcx>>, + region_substs: &Substs<'tcx>, + ast_ty: &hir::Ty) + -> Ty<'tcx> { let tcx = self.tcx(); - if let Some(def) = decl_generics.types.opt_get(TypeSpace, index) { + if let Some(def) = def { let object_lifetime_default = def.object_lifetime_default.subst(tcx, region_substs); let rscope1 = &ObjectLifetimeDefaultRscope::new(rscope, object_lifetime_default); self.ast_ty_to_ty(rscope1, ast_ty) @@ -1515,25 +1465,19 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { Def::Trait(trait_def_id) => { // N.B. this case overlaps somewhat with // TyObjectSum, see that fn for details - let mut projection_bounds = Vec::new(); - - let trait_ref = - self.object_path_to_poly_trait_ref(rscope, - span, - param_mode, - trait_def_id, - base_path_ref_id, - base_segments.last().unwrap(), - &mut projection_bounds); tcx.prohibit_type_params(base_segments.split_last().unwrap().1); - self.trait_ref_to_object_type(rscope, - span, - trait_ref, - projection_bounds, - &[]) + + self.trait_path_to_object_type(rscope, + span, + param_mode, + trait_def_id, + base_path_ref_id, + base_segments.last().unwrap(), + span, + partition_bounds(tcx, span, &[])) } - Def::Enum(did) | Def::TyAlias(did) | Def::Struct(did) => { + Def::Enum(did) | Def::TyAlias(did) | Def::Struct(did) | Def::Union(did) => { tcx.prohibit_type_params(base_segments.split_last().unwrap().1); self.ast_path_to_ty(rscope, span, @@ -1541,20 +1485,31 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { did, base_segments.last().unwrap()) } - Def::TyParam(space, index, _, name) => { + Def::TyParam(did) => { tcx.prohibit_type_params(base_segments); - tcx.mk_param(space, index, name) + + let node_id = tcx.map.as_local_node_id(did).unwrap(); + let param = tcx.ty_param_defs.borrow().get(&node_id) + .map(ty::ParamTy::for_def); + if let Some(p) = param { + p.to_ty(tcx) + } else { + // Only while computing defaults of earlier type + // parameters can a type parameter be missing its def. + struct_span_err!(tcx.sess, span, E0128, + "type parameters with a default cannot use \ + forward declared identifiers") + .span_label(span, &format!("defaulted type parameters \ + cannot be forward declared")) + .emit(); + tcx.types.err + } } - Def::SelfTy(_, Some(impl_id)) => { + Def::SelfTy(_, Some(def_id)) => { // Self in impl (we know the concrete type). - // For Def::SelfTy() values inlined from another crate, the - // impl_id will be DUMMY_NODE_ID, which would cause problems - // here. But we should never run into an impl from another crate - // in this pass. - assert!(impl_id != ast::DUMMY_NODE_ID); - tcx.prohibit_type_params(base_segments); + let impl_id = tcx.map.as_local_node_id(def_id).unwrap(); let ty = tcx.node_id_to_type(impl_id); if let Some(free_substs) = self.get_free_substs() { ty.subst(tcx, free_substs) @@ -1567,8 +1522,9 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { tcx.prohibit_type_params(base_segments); tcx.mk_self_type() } - Def::AssociatedTy(trait_did, _) => { + Def::AssociatedTy(def_id) => { tcx.prohibit_type_params(&base_segments[..base_segments.len()-2]); + let trait_did = tcx.parent_def_id(def_id).unwrap(); self.qpath_to_ty(rscope, span, param_mode, @@ -1671,18 +1627,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { tcx.mk_slice(self.ast_ty_to_ty(rscope, &ty)) } hir::TyObjectSum(ref ty, ref bounds) => { - match self.ast_ty_to_trait_ref(rscope, &ty, bounds) { - Ok((trait_ref, projection_bounds)) => { - self.trait_ref_to_object_type(rscope, - ast_ty.span, - trait_ref, - projection_bounds, - bounds) - } - Err(ErrorReported) => { - self.tcx().types.err - } - } + self.ast_ty_to_object_trait_ref(rscope, ast_ty.span, ty, bounds) } hir::TyPtr(ref mt) => { tcx.mk_ptr(ty::TypeAndMut { @@ -1698,7 +1643,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { rscope, ty::ObjectLifetimeDefault::Specific(r)); let t = self.ast_ty_to_ty(rscope1, &mt.ty); - tcx.mk_ref(tcx.mk_region(r), ty::TypeAndMut {ty: t, mutbl: mt.mutbl}) + tcx.mk_ref(r, ty::TypeAndMut {ty: t, mutbl: mt.mutbl}) } hir::TyNever => { tcx.types.never @@ -1759,7 +1704,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { tcx.mk_fn_ptr(bare_fn_ty) } hir::TyPolyTraitRef(ref bounds) => { - self.conv_ty_poly_trait_ref(rscope, ast_ty.span, bounds) + self.conv_object_ty_poly_trait_ref(rscope, ast_ty.span, bounds) } hir::TyImplTrait(ref bounds) => { use collect::{compute_bounds, SizedByDefault}; @@ -1767,7 +1712,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { // Create the anonymized type. let def_id = tcx.map.local_def_id(ast_ty.id); if let Some(anon_scope) = rscope.anon_type_scope() { - let substs = anon_scope.fresh_substs(tcx); + let substs = anon_scope.fresh_substs(self, ast_ty.span); let ty = tcx.mk_anon(tcx.map.local_def_id(ast_ty.id), substs); // Collect the bounds, i.e. the `A+B+'c` in `impl A+B+'c`. @@ -1778,7 +1723,8 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { let predicates = bounds.predicates(tcx, ty); let predicates = tcx.lift_to_global(&predicates).unwrap(); tcx.predicates.borrow_mut().insert(def_id, ty::GenericPredicates { - predicates: VecPerParamSpace::new(vec![], vec![], predicates) + parent: None, + predicates: predicates }); ty @@ -1820,8 +1766,11 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { } } hir::TyTypeof(ref _e) => { - span_err!(tcx.sess, ast_ty.span, E0516, - "`typeof` is a reserved keyword but unimplemented"); + struct_span_err!(tcx.sess, ast_ty.span, E0516, + "`typeof` is a reserved keyword but unimplemented") + .span_label(ast_ty.span, &format!("reserved keyword")) + .emit(); + tcx.types.err } hir::TyInfer => { @@ -1829,7 +1778,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { // values in a ExprClosure, or as // the type of local variables. Both of these cases are // handled specially and will not descend into this routine. - self.ty_infer(None, None, None, ast_ty.span) + self.ty_infer(ast_ty.span) } }; @@ -1846,7 +1795,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { { match a.ty.node { hir::TyInfer if expected_ty.is_some() => expected_ty.unwrap(), - hir::TyInfer => self.ty_infer(None, None, None, a.ty.span), + hir::TyInfer => self.ty_infer(a.ty.span), _ => self.ast_ty_to_ty(rscope, &a.ty), } } @@ -1855,7 +1804,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { sig: &hir::MethodSig, untransformed_self_ty: Ty<'tcx>, anon_scope: Option) - -> (&'tcx ty::BareFnTy<'tcx>, ty::ExplicitSelfCategory) { + -> (&'tcx ty::BareFnTy<'tcx>, ty::ExplicitSelfCategory<'tcx>) { self.ty_of_method_or_bare_fn(sig.unsafety, sig.abi, Some(untransformed_self_ty), @@ -1880,7 +1829,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { decl: &hir::FnDecl, arg_anon_scope: Option, ret_anon_scope: Option) - -> (&'tcx ty::BareFnTy<'tcx>, ty::ExplicitSelfCategory) + -> (&'tcx ty::BareFnTy<'tcx>, ty::ExplicitSelfCategory<'tcx>) { debug!("ty_of_method_or_bare_fn"); @@ -1917,7 +1866,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { // reference) in the arguments, then any anonymous regions in the output // have that lifetime. let implied_output_region = match explicit_self_category { - ty::ExplicitSelfCategory::ByReference(region, _) => Ok(region), + ty::ExplicitSelfCategory::ByReference(region, _) => Ok(*region), _ => self.find_implied_output_region(&arg_tys, arg_pats) }; @@ -1929,11 +1878,16 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { hir::DefaultReturn(..) => self.tcx().mk_nil(), }; + let input_tys = self_ty.into_iter().chain(arg_tys).collect(); + + debug!("ty_of_method_or_bare_fn: input_tys={:?}", input_tys); + debug!("ty_of_method_or_bare_fn: output_ty={:?}", output_ty); + (self.tcx().mk_bare_fn(ty::BareFnTy { unsafety: unsafety, abi: abi, sig: ty::Binder(ty::FnSig { - inputs: self_ty.into_iter().chain(arg_tys).collect(), + inputs: input_tys, output: output_ty, variadic: decl.variadic }), @@ -1944,7 +1898,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { rscope: &RegionScope, untransformed_self_ty: Ty<'tcx>, explicit_self: &hir::ExplicitSelf) - -> (Ty<'tcx>, ty::ExplicitSelfCategory) + -> (Ty<'tcx>, ty::ExplicitSelfCategory<'tcx>) { return match explicit_self.node { SelfKind::Value(..) => { @@ -1956,8 +1910,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { rscope, explicit_self.span, lifetime); - (self.tcx().mk_ref( - self.tcx().mk_region(region), + (self.tcx().mk_ref(region, ty::TypeAndMut { ty: untransformed_self_ty, mutbl: mutability @@ -2011,7 +1964,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { ty::ExplicitSelfCategory::ByValue } else { match explicit_type.sty { - ty::TyRef(r, mt) => ty::ExplicitSelfCategory::ByReference(*r, mt.mutbl), + ty::TyRef(r, mt) => ty::ExplicitSelfCategory::ByReference(r, mt.mutbl), ty::TyBox(_) => ty::ExplicitSelfCategory::ByBox, _ => ty::ExplicitSelfCategory::ByValue, } @@ -2068,8 +2021,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { let output_ty = match decl.output { _ if is_infer && expected_ret_ty.is_some() => expected_ret_ty.unwrap(), - _ if is_infer => - self.ty_infer(None, None, None, decl.output.span()), + _ if is_infer => self.ty_infer(decl.output.span()), hir::Return(ref output) => self.ast_ty_to_ty(&rb, &output), hir::DefaultReturn(..) => bug!(), @@ -2087,28 +2039,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { } } - /// Given an existential type like `Foo+'a+Bar`, this routine converts - /// the `'a` and `Bar` intos an `ExistentialBounds` struct. - /// The `main_trait_refs` argument specifies the `Foo` -- it is absent - /// for closures. Eventually this should all be normalized, I think, - /// so that there is no "main trait ref" and instead we just have a flat - /// list of bounds as the existential type. - fn conv_existential_bounds(&self, - rscope: &RegionScope, - span: Span, - principal_trait_ref: ty::PolyTraitRef<'tcx>, - projection_bounds: Vec>, - ast_bounds: &[hir::TyParamBound]) - -> ty::ExistentialBounds<'tcx> - { - let partitioned_bounds = - partition_bounds(self.tcx(), span, ast_bounds); - - self.conv_existential_bounds_from_partitioned_bounds( - rscope, span, principal_trait_ref, projection_bounds, partitioned_bounds) - } - - fn conv_ty_poly_trait_ref(&self, + fn conv_object_ty_poly_trait_ref(&self, rscope: &RegionScope, span: Span, ast_bounds: &[hir::TyParamBound]) @@ -2116,75 +2047,24 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { { let mut partitioned_bounds = partition_bounds(self.tcx(), span, &ast_bounds[..]); - let mut projection_bounds = Vec::new(); - let main_trait_bound = if !partitioned_bounds.trait_bounds.is_empty() { - let trait_bound = partitioned_bounds.trait_bounds.remove(0); - self.instantiate_poly_trait_ref(rscope, - trait_bound, - None, - &mut projection_bounds) + let trait_bound = if !partitioned_bounds.trait_bounds.is_empty() { + partitioned_bounds.trait_bounds.remove(0) } else { span_err!(self.tcx().sess, span, E0224, "at least one non-builtin trait is required for an object type"); return self.tcx().types.err; }; - let bounds = - self.conv_existential_bounds_from_partitioned_bounds(rscope, - span, - main_trait_bound.clone(), - projection_bounds, - partitioned_bounds); - - self.make_object_type(span, main_trait_bound, bounds) - } - - pub fn conv_existential_bounds_from_partitioned_bounds(&self, - rscope: &RegionScope, - span: Span, - principal_trait_ref: ty::PolyTraitRef<'tcx>, - projection_bounds: Vec>, // Empty for boxed closures - partitioned_bounds: PartitionedBounds) - -> ty::ExistentialBounds<'tcx> - { - let PartitionedBounds { builtin_bounds, - trait_bounds, - region_bounds } = - partitioned_bounds; - - if !trait_bounds.is_empty() { - let b = &trait_bounds[0]; - let span = b.trait_ref.path.span; - struct_span_err!(self.tcx().sess, span, E0225, - "only the builtin traits can be used as closure or object bounds") - .span_label(span, &format!("non-builtin trait used as bounds")) - .emit(); - } - - let region_bound = - self.compute_object_lifetime_bound(span, - ®ion_bounds, - principal_trait_ref, - builtin_bounds); - - let region_bound = match region_bound { - Some(r) => r, - None => { - match rscope.object_lifetime_default(span) { - Some(r) => r, - None => { - span_err!(self.tcx().sess, span, E0228, - "the lifetime bound for this object type cannot be deduced \ - from context; please supply an explicit bound"); - ty::ReStatic - } - } - } - }; - - debug!("region_bound: {:?}", region_bound); - - ty::ExistentialBounds::new(region_bound, builtin_bounds, projection_bounds) + let trait_ref = &trait_bound.trait_ref; + let trait_def_id = self.trait_def_id(trait_ref); + self.trait_path_to_object_type(rscope, + trait_ref.path.span, + PathParamMode::Explicit, + trait_def_id, + trait_ref.ref_id, + trait_ref.path.segments.last().unwrap(), + span, + partitioned_bounds) } /// Given the bounds on an object, determines what single region bound (if any) we can @@ -2195,9 +2075,9 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { fn compute_object_lifetime_bound(&self, span: Span, explicit_region_bounds: &[&hir::Lifetime], - principal_trait_ref: ty::PolyTraitRef<'tcx>, + principal_trait_ref: ty::PolyExistentialTraitRef<'tcx>, builtin_bounds: ty::BuiltinBounds) - -> Option // if None, use the default + -> Option<&'tcx ty::Region> // if None, use the default { let tcx = self.tcx(); @@ -2220,13 +2100,13 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { if let Err(ErrorReported) = self.ensure_super_predicates(span, principal_trait_ref.def_id()) { - return Some(ty::ReStatic); + return Some(tcx.mk_region(ty::ReStatic)); } // No explicit region bound specified. Therefore, examine trait // bounds and see if we can derive region bounds from those. let derived_region_bounds = - object_region_bounds(tcx, &principal_trait_ref, builtin_bounds); + object_region_bounds(tcx, principal_trait_ref, builtin_bounds); // If there are no derived region bounds, then report back that we // can find no region bound. The caller will use the default. @@ -2236,8 +2116,8 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { // If any of the derived region bounds are 'static, that is always // the best choice. - if derived_region_bounds.iter().any(|r| ty::ReStatic == *r) { - return Some(ty::ReStatic); + if derived_region_bounds.iter().any(|&r| ty::ReStatic == *r) { + return Some(tcx.mk_region(ty::ReStatic)); } // Determine whether there is exactly one unique region in the set @@ -2279,7 +2159,7 @@ pub fn partition_bounds<'a, 'b, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, let parameters = &segments[segments.len() - 1].parameters; if !parameters.types().is_empty() { check_type_argument_count(tcx, b.trait_ref.path.span, - parameters.types().len(), 0, 0); + parameters.types().len(), &[]); } if !parameters.lifetimes().is_empty() { report_lifetime_number_error(tcx, b.trait_ref.path.span, @@ -2310,7 +2190,9 @@ pub fn partition_bounds<'a, 'b, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, } fn check_type_argument_count(tcx: TyCtxt, span: Span, supplied: usize, - required: usize, accepted: usize) { + ty_param_defs: &[ty::TypeParameterDef]) { + let accepted = ty_param_defs.len(); + let required = ty_param_defs.iter().take_while(|x| x.default.is_none()) .count(); if supplied < required { let expected = if required < accepted { "expected at least" @@ -2367,7 +2249,7 @@ fn report_lifetime_number_error(tcx: TyCtxt, span: Span, number: usize, expected // and return from functions in multiple places. #[derive(PartialEq, Eq, Clone, Debug)] pub struct Bounds<'tcx> { - pub region_bounds: Vec, + pub region_bounds: Vec<&'tcx ty::Region>, pub builtin_bounds: ty::BuiltinBounds, pub trait_bounds: Vec>, pub projection_bounds: Vec>, @@ -2389,7 +2271,7 @@ impl<'a, 'gcx, 'tcx> Bounds<'tcx> { for ®ion_bound in &self.region_bounds { // account for the binder being introduced below; no need to shift `param_ty` // because, at present at least, it can only refer to early-bound regions - let region_bound = ty::fold::shift_region(region_bound, 1); + let region_bound = tcx.mk_region(ty::fold::shift_region(*region_bound, 1)); vec.push(ty::Binder(ty::OutlivesPredicate(param_ty, region_bound)).to_predicate()); } diff --git a/src/librustc_typeck/check/_match.rs b/src/librustc_typeck/check/_match.rs index c2fba80385..507de9a9e3 100644 --- a/src/librustc_typeck/check/_match.rs +++ b/src/librustc_typeck/check/_match.rs @@ -11,7 +11,6 @@ use hir::def::Def; use rustc::infer::{self, InferOk, TypeOrigin}; use hir::pat_util::EnumerateAndAdjustIterator; -use rustc::ty::subst::Substs; use rustc::ty::{self, Ty, TypeFoldable, LvaluePreference, VariantKind}; use check::{FnCtxt, Expectation}; use lint; @@ -33,17 +32,16 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { debug!("check_pat(pat={:?},expected={:?})", pat, expected); - match pat.node { + let ty = match pat.node { PatKind::Wild => { - self.write_ty(pat.id, expected); + expected } PatKind::Lit(ref lt) => { - self.check_expr(<); - let expr_ty = self.expr_ty(<); + let ty = self.check_expr(<); // Byte string patterns behave the same way as array patterns // They can denote both statically and dynamically sized byte arrays - let mut pat_ty = expr_ty; + let mut pat_ty = ty; if let hir::ExprLit(ref lt) = lt.node { if let ast::LitKind::ByteStr(_) = lt.node { let expected_ty = self.structurally_resolved_type(pat.span, expected); @@ -56,28 +54,24 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - self.write_ty(pat.id, pat_ty); - // somewhat surprising: in this case, the subtyping // relation goes the opposite way as the other // cases. Actually what we really want is not a subtyping // relation at all but rather that there exists a LUB (so // that they can be compared). However, in practice, // constants are always scalars or strings. For scalars - // subtyping is irrelevant, and for strings `expr_ty` is + // subtyping is irrelevant, and for strings `ty` is // type is `&'static str`, so if we say that // // &'static str <: expected // // that's equivalent to there existing a LUB. self.demand_suptype(pat.span, expected, pat_ty); + pat_ty } PatKind::Range(ref begin, ref end) => { - self.check_expr(begin); - self.check_expr(end); - - let lhs_ty = self.expr_ty(begin); - let rhs_ty = self.expr_ty(end); + let lhs_ty = self.check_expr(begin); + let rhs_ty = self.check_expr(end); // Check that both end-points are of numeric or char type. let numeric_or_char = |ty: Ty| ty.is_numeric() || ty.is_char(); @@ -106,11 +100,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // it to type the entire expression. let common_type = self.resolve_type_vars_if_possible(&lhs_ty); - self.write_ty(pat.id, common_type); - // subtyping doesn't matter here, as the value is some kind of scalar self.demand_eqtype(pat.span, expected, lhs_ty); self.demand_eqtype(pat.span, expected, rhs_ty); + common_type } PatKind::Binding(bm, _, ref sub) => { let typ = self.local_ty(pat.span, pat.id); @@ -122,7 +115,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // and T is the expected type. let region_var = self.next_region_var(infer::PatternRegion(pat.span)); let mt = ty::TypeAndMut { ty: expected, mutbl: mutbl }; - let region_ty = tcx.mk_ref(tcx.mk_region(region_var), mt); + let region_ty = tcx.mk_ref(region_var, mt); // `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` is // required. However, we use equality, which is stronger. See (*) for @@ -137,13 +130,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - self.write_ty(pat.id, typ); - // if there are multiple arms, make sure they all agree on // what the type of the binding `x` ought to be match tcx.expect_def(pat.id) { Def::Err => {} - Def::Local(_, var_id) => { + Def::Local(def_id) => { + let var_id = tcx.map.as_local_node_id(def_id).unwrap(); if var_id != pat.id { let vt = self.local_ty(pat.span, var_id); self.demand_eqtype(pat.span, vt, typ); @@ -155,16 +147,18 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let Some(ref p) = *sub { self.check_pat(&p, expected); } + + typ } PatKind::TupleStruct(ref path, ref subpats, ddpos) => { - self.check_pat_tuple_struct(pat, path, &subpats, ddpos, expected); + self.check_pat_tuple_struct(pat, path, &subpats, ddpos, expected) } PatKind::Path(ref opt_qself, ref path) => { let opt_qself_ty = opt_qself.as_ref().map(|qself| self.to_ty(&qself.ty)); - self.check_pat_path(pat, opt_qself_ty, path, expected); + self.check_pat_path(pat, opt_qself_ty, path, expected) } PatKind::Struct(ref path, ref fields, etc) => { - self.check_pat_struct(pat, path, fields, etc, expected); + self.check_pat_struct(pat, path, fields, etc, expected) } PatKind::Tuple(ref elements, ddpos) => { let mut expected_len = elements.len(); @@ -179,11 +173,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let element_tys: Vec<_> = (0 .. max_len).map(|_| self.next_ty_var()).collect(); let pat_ty = tcx.mk_tup(element_tys.clone()); - self.write_ty(pat.id, pat_ty); self.demand_eqtype(pat.span, expected, pat_ty); for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) { self.check_pat(elem, &element_tys[i]); } + pat_ty } PatKind::Box(ref inner) => { let inner_ty = self.next_ty_var(); @@ -194,11 +188,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // think any errors can be introduced by using // `demand::eqtype`. self.demand_eqtype(pat.span, expected, uniq_ty); - self.write_ty(pat.id, uniq_ty); self.check_pat(&inner, inner_ty); + uniq_ty } else { - self.write_error(pat.id); self.check_pat(&inner, tcx.types.err); + tcx.types.err } } PatKind::Ref(ref inner, mutbl) => { @@ -220,17 +214,17 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let inner_ty = self.next_ty_var(); let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl }; let region = self.next_region_var(infer::PatternRegion(pat.span)); - let rptr_ty = tcx.mk_ref(tcx.mk_region(region), mt); + let rptr_ty = tcx.mk_ref(region, mt); self.demand_eqtype(pat.span, expected, rptr_ty); (rptr_ty, inner_ty) } }; - self.write_ty(pat.id, rptr_ty); self.check_pat(&inner, inner_ty); + rptr_ty } else { - self.write_error(pat.id); self.check_pat(&inner, tcx.types.err); + tcx.types.err } } PatKind::Vec(ref before, ref slice, ref after) => { @@ -240,17 +234,23 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let min_len = before.len() + after.len(); if slice.is_none() { if min_len != size { - span_err!(tcx.sess, pat.span, E0527, - "pattern requires {} elements but array has {}", - min_len, size); + struct_span_err!( + tcx.sess, pat.span, E0527, + "pattern requires {} elements but array has {}", + min_len, size) + .span_label(pat.span, &format!("expected {} elements",size)) + .emit(); } (inner_ty, tcx.types.err) } else if let Some(rest) = size.checked_sub(min_len) { (inner_ty, tcx.mk_array(inner_ty, rest)) } else { - span_err!(tcx.sess, pat.span, E0528, - "pattern requires at least {} elements but array has {}", - min_len, size); + struct_span_err!(tcx.sess, pat.span, E0528, + "pattern requires at least {} elements but array has {}", + min_len, size) + .span_label(pat.span, + &format!("pattern cannot match array of {} elements", size)) + .emit(); (inner_ty, tcx.types.err) } } @@ -270,14 +270,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { _ => {} } } - err.emit(); + + err.span_label( pat.span, + &format!("pattern cannot match with input type `{}`", expected_ty) + ).emit(); } (tcx.types.err, tcx.types.err) } }; - self.write_ty(pat.id, expected_ty); - for elt in before { self.check_pat(&elt, inner_ty); } @@ -287,8 +288,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { for elt in after { self.check_pat(&elt, inner_ty); } + expected_ty } - } + }; + + self.write_ty(pat.id, ty); // (*) In most of the cases above (literals and constants being // the exception), we relate types using strict equality, evewn @@ -347,9 +351,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let ty::TyTrait(..) = mt.ty.sty { // This is "x = SomeTrait" being reduced from // "let &x = &SomeTrait" or "let box x = Box", an error. - span_err!(self.tcx.sess, span, E0033, - "type `{}` cannot be dereferenced", - self.ty_to_string(expected)); + let type_str = self.ty_to_string(expected); + struct_span_err!(self.tcx.sess, span, E0033, + "type `{}` cannot be dereferenced", type_str) + .span_label(span, &format!("type `{}` cannot be dereferenced", type_str)) + .emit(); return false } } @@ -364,7 +370,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { discrim: &'gcx hir::Expr, arms: &'gcx [hir::Arm], expected: Expectation<'tcx>, - match_src: hir::MatchSource) { + match_src: hir::MatchSource) -> Ty<'tcx> { let tcx = self.tcx; // Not entirely obvious: if matches may create ref bindings, we @@ -378,8 +384,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }); let discrim_ty; if let Some(m) = contains_ref_bindings { - self.check_expr_with_lvalue_pref(discrim, LvaluePreference::from_mutbl(m)); - discrim_ty = self.expr_ty(discrim); + discrim_ty = self.check_expr_with_lvalue_pref(discrim, LvaluePreference::from_mutbl(m)); } else { // ...but otherwise we want to use any supertype of the // discriminant. This is sort of a workaround, see note (*) in @@ -418,12 +423,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } _ => result_ty }; + for (i, arm) in arms.iter().enumerate() { if let Some(ref e) = arm.guard { self.check_expr_has_type(e, tcx.types.bool); } - self.check_expr_with_expectation(&arm.body, expected); - let arm_ty = self.expr_ty(&arm.body); + let arm_ty = self.check_expr_with_expectation(&arm.body, expected); if result_ty.references_error() || arm_ty.references_error() { result_ty = tcx.types.err; @@ -453,10 +458,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }) } else if i == 0 { // Special-case the first arm, as it has no "previous expressions". - self.try_coerce(&arm.body, coerce_first) + self.try_coerce(&arm.body, arm_ty, coerce_first) } else { let prev_arms = || arms[..i].iter().map(|arm| &*arm.body); - self.try_find_coercion_lub(origin, prev_arms, result_ty, &arm.body) + self.try_find_coercion_lub(origin, prev_arms, result_ty, &arm.body, arm_ty) }; result_ty = match result { @@ -473,7 +478,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }; } - self.write_ty(expr.id, result_ty); + result_ty } } @@ -483,43 +488,38 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { path: &hir::Path, fields: &'gcx [Spanned], etc: bool, - expected: Ty<'tcx>) + expected: Ty<'tcx>) -> Ty<'tcx> { // Resolve the path and check the definition for errors. let (variant, pat_ty) = if let Some(variant_ty) = self.check_struct_path(path, pat.id, pat.span) { variant_ty } else { - self.write_error(pat.id); for field in fields { self.check_pat(&field.node.pat, self.tcx.types.err); } - return; + return self.tcx.types.err; }; // Type check the path. self.demand_eqtype(pat.span, expected, pat_ty); // Type check subpatterns. - let substs = match pat_ty.sty { - ty::TyStruct(_, substs) | ty::TyEnum(_, substs) => substs, - _ => span_bug!(pat.span, "struct variant is not an ADT") - }; - self.check_struct_pat_fields(pat.span, fields, variant, substs, etc); + self.check_struct_pat_fields(pat_ty, pat.span, variant, fields, etc); + pat_ty } fn check_pat_path(&self, pat: &hir::Pat, opt_self_ty: Option>, path: &hir::Path, - expected: Ty<'tcx>) + expected: Ty<'tcx>) -> Ty<'tcx> { let tcx = self.tcx; let report_unexpected_def = || { span_err!(tcx.sess, pat.span, E0533, "`{}` does not name a unit variant, unit struct or a constant", pprust::path_to_string(path)); - self.write_error(pat.id); }; // Resolve the path and check the definition for errors. @@ -528,18 +528,17 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { match def { Def::Err => { self.set_tainted_by_errors(); - self.write_error(pat.id); - return; + return tcx.types.err; } Def::Method(..) => { report_unexpected_def(); - return; + return tcx.types.err; } Def::Variant(..) => { let variant = tcx.expect_variant_def(def); if variant.kind != VariantKind::Unit { report_unexpected_def(); - return; + return tcx.types.err; } } Def::Struct(ctor_did) => { @@ -547,7 +546,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let variant = tcx.lookup_adt_def(did).struct_variant(); if variant.kind != VariantKind::Unit { report_unexpected_def(); - return; + return tcx.types.err; } } Def::Const(..) | Def::AssociatedConst(..) => {} // OK @@ -555,11 +554,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } // Type check the path. - let scheme = tcx.lookup_item_type(def.def_id()); - let predicates = tcx.lookup_predicates(def.def_id()); - let pat_ty = self.instantiate_value_path(segments, scheme, &predicates, - opt_ty, def, pat.span, pat.id); + let pat_ty = self.instantiate_value_path(segments, opt_ty, def, pat.span, pat.id); self.demand_suptype(pat.span, expected, pat_ty); + pat_ty } fn check_pat_tuple_struct(&self, @@ -567,11 +564,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { path: &hir::Path, subpats: &'gcx [P], ddpos: Option, - expected: Ty<'tcx>) + expected: Ty<'tcx>) -> Ty<'tcx> { let tcx = self.tcx; let on_error = || { - self.write_error(pat.id); for pat in subpats { self.check_pat(&pat, tcx.types.err); } @@ -583,7 +579,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { tcx.sess.add_lint(lint::builtin::MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT, pat.id, pat.span, msg); } else { - span_err!(tcx.sess, pat.span, E0164, "{}", msg); + struct_span_err!(tcx.sess, pat.span, E0164, "{}", msg) + .span_label(pat.span, &format!("not a tuple variant or struct")).emit(); on_error(); } }; @@ -594,11 +591,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Def::Err => { self.set_tainted_by_errors(); on_error(); - return; + return tcx.types.err; } Def::Const(..) | Def::AssociatedConst(..) | Def::Method(..) => { report_unexpected_def(false); - return; + return tcx.types.err; } Def::Variant(..) => { tcx.expect_variant_def(def) @@ -615,29 +612,25 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { report_unexpected_def(true); } else if variant.kind != VariantKind::Tuple { report_unexpected_def(false); - return; + return tcx.types.err; } // Type check the path. - let scheme = tcx.lookup_item_type(def.def_id()); - let scheme = if scheme.ty.is_fn() { + let pat_ty = self.instantiate_value_path(segments, opt_ty, def, pat.span, pat.id); + let pat_ty = if pat_ty.is_fn() { // Replace constructor type with constructed type for tuple struct patterns. - let fn_ret = tcx.no_late_bound_regions(&scheme.ty.fn_ret()).unwrap(); - ty::TypeScheme { ty: fn_ret, generics: scheme.generics } + tcx.no_late_bound_regions(&pat_ty.fn_ret()).unwrap() } else { // Leave the type as is for unit structs (backward compatibility). - scheme + pat_ty }; - let predicates = tcx.lookup_predicates(def.def_id()); - let pat_ty = self.instantiate_value_path(segments, scheme, &predicates, - opt_ty, def, pat.span, pat.id); self.demand_eqtype(pat.span, expected, pat_ty); // Type check subpatterns. if subpats.len() == variant.fields.len() || subpats.len() < variant.fields.len() && ddpos.is_some() { let substs = match pat_ty.sty { - ty::TyStruct(_, substs) | ty::TyEnum(_, substs) => substs, + ty::TyAdt(_, substs) => substs, ref ty => bug!("unexpected pattern type {:?}", ty), }; for (i, subpat) in subpats.iter().enumerate_and_adjust(variant.fields.len(), ddpos) { @@ -663,22 +656,24 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { variant.fields.len(), fields_ending, subpats.len())) .emit(); on_error(); + return tcx.types.err; } + pat_ty } - /// `path` is the AST path item naming the type of this struct. - /// `fields` is the field patterns of the struct pattern. - /// `struct_fields` describes the type of each field of the struct. - /// `struct_id` is the ID of the struct. - /// `etc` is true if the pattern said '...' and false otherwise. - pub fn check_struct_pat_fields(&self, - span: Span, - fields: &'gcx [Spanned], - variant: ty::VariantDef<'tcx>, - substs: &Substs<'tcx>, - etc: bool) { + fn check_struct_pat_fields(&self, + adt_ty: Ty<'tcx>, + span: Span, + variant: ty::VariantDef<'tcx>, + fields: &'gcx [Spanned], + etc: bool) { let tcx = self.tcx; + let (substs, kind_name) = match adt_ty.sty { + ty::TyAdt(adt, substs) => (substs, adt.variant_descr()), + _ => span_bug!(span, "struct pattern is not an ADT") + }; + // Index the struct fields' types. let field_map = variant.fields .iter() @@ -708,11 +703,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { .map(|f| self.field_ty(span, f, substs)) .unwrap_or_else(|| { struct_span_err!(tcx.sess, span, E0026, - "struct `{}` does not have a field named `{}`", + "{} `{}` does not have a field named `{}`", + kind_name, tcx.item_path_str(variant.did), field.name) .span_label(span, - &format!("struct `{}` does not have field `{}`", + &format!("{} `{}` does not have field `{}`", + kind_name, tcx.item_path_str(variant.did), field.name)) .emit(); @@ -725,8 +722,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.check_pat(&field.pat, field_ty); } - // Report an error if not all the fields were specified. - if !etc { + // Report an error if incorrect number of the fields were specified. + if kind_name == "union" { + if fields.len() != 1 { + tcx.sess.span_err(span, "union patterns should have exactly one field"); + } + if etc { + tcx.sess.span_err(span, "`..` cannot be used in union patterns"); + } + } else if !etc { for field in variant.fields .iter() .filter(|field| !used_fields.contains_key(&field.name)) { diff --git a/src/librustc_typeck/check/autoderef.rs b/src/librustc_typeck/check/autoderef.rs index 265422468f..19261a2447 100644 --- a/src/librustc_typeck/check/autoderef.rs +++ b/src/librustc_typeck/check/autoderef.rs @@ -101,7 +101,7 @@ impl<'a, 'gcx, 'tcx> Autoderef<'a, 'gcx, 'tcx> { Some(f) => f, None => return None }, - substs: tcx.mk_substs(Substs::new_trait(vec![], vec![], self.cur_ty)) + substs: Substs::new_trait(tcx, self.cur_ty, &[]) }; let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id); diff --git a/src/librustc_typeck/check/callee.rs b/src/librustc_typeck/check/callee.rs index 985c3be149..d1fb0736d2 100644 --- a/src/librustc_typeck/check/callee.rs +++ b/src/librustc_typeck/check/callee.rs @@ -12,10 +12,9 @@ use super::{DeferredCallResolution, Expectation, FnCtxt, TupleArgumentsFlag}; use CrateCtxt; -use middle::cstore::LOCAL_CRATE; use hir::def::Def; -use hir::def_id::DefId; -use rustc::infer; +use hir::def_id::{DefId, LOCAL_CRATE}; +use rustc::{infer, traits}; use rustc::ty::{self, LvaluePreference, Ty}; use syntax::parse::token; use syntax::ptr::P; @@ -29,7 +28,7 @@ use rustc::hir; pub fn check_legal_trait_for_method_call(ccx: &CrateCtxt, span: Span, trait_id: DefId) { if ccx.tcx.lang_items.drop_trait() == Some(trait_id) { struct_span_err!(ccx.tcx.sess, span, E0040, "explicit use of destructor method") - .span_label(span, &format!("call to destructor method")) + .span_label(span, &format!("explicit destructor calls not allowed")) .emit(); } } @@ -45,10 +44,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { call_expr: &'gcx hir::Expr, callee_expr: &'gcx hir::Expr, arg_exprs: &'gcx [P], - expected: Expectation<'tcx>) + expected: Expectation<'tcx>) -> Ty<'tcx> { - self.check_expr(callee_expr); - let original_callee_ty = self.expr_ty(callee_expr); + let original_callee_ty = self.check_expr(callee_expr); let mut autoderef = self.autoderef(callee_expr.span, original_callee_ty); let result = autoderef.by_ref().flat_map(|(adj_ty, idx)| { @@ -57,25 +55,30 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let callee_ty = autoderef.unambiguous_final_ty(); autoderef.finalize(LvaluePreference::NoPreference, Some(callee_expr)); - match result { + let output = match result { None => { // this will report an error since original_callee_ty is not a fn - self.confirm_builtin_call(call_expr, original_callee_ty, arg_exprs, expected); + self.confirm_builtin_call(call_expr, original_callee_ty, arg_exprs, expected) } Some(CallStep::Builtin) => { - self.confirm_builtin_call(call_expr, callee_ty, arg_exprs, expected); + self.confirm_builtin_call(call_expr, callee_ty, arg_exprs, expected) } Some(CallStep::DeferredClosure(fn_sig)) => { - self.confirm_deferred_closure_call(call_expr, arg_exprs, expected, fn_sig); + self.confirm_deferred_closure_call(call_expr, arg_exprs, expected, fn_sig) } Some(CallStep::Overloaded(method_callee)) => { self.confirm_overloaded_call(call_expr, callee_expr, - arg_exprs, expected, method_callee); + arg_exprs, expected, method_callee) } - } + }; + + // we must check that return type of called functions is WF: + self.register_wf_obligation(output, call_expr.span, traits::MiscObligation); + + output } fn try_overloaded_call_step(&self, @@ -181,12 +184,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { call_expr: &hir::Expr, callee_ty: Ty<'tcx>, arg_exprs: &'gcx [P], - expected: Expectation<'tcx>) + expected: Expectation<'tcx>) -> Ty<'tcx> { let error_fn_sig; let fn_sig = match callee_ty.sty { - ty::TyFnDef(_, _, &ty::BareFnTy {ref sig, ..}) | + ty::TyFnDef(.., &ty::BareFnTy {ref sig, ..}) | ty::TyFnPtr(&ty::BareFnTy {ref sig, ..}) => { sig } @@ -245,14 +248,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn_sig.variadic, TupleArgumentsFlag::DontTupleArguments); - self.write_call(call_expr, fn_sig.output); + fn_sig.output } fn confirm_deferred_closure_call(&self, call_expr: &hir::Expr, arg_exprs: &'gcx [P], expected: Expectation<'tcx>, - fn_sig: ty::FnSig<'tcx>) + fn_sig: ty::FnSig<'tcx>) -> Ty<'tcx> { // `fn_sig` is the *signature* of the cosure being called. We // don't know the full details yet (`Fn` vs `FnMut` etc), but we @@ -272,7 +275,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn_sig.variadic, TupleArgumentsFlag::TupleArguments); - self.write_call(call_expr, fn_sig.output); + fn_sig.output } fn confirm_overloaded_call(&self, @@ -280,7 +283,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { callee_expr: &'gcx hir::Expr, arg_exprs: &'gcx [P], expected: Expectation<'tcx>, - method_callee: ty::MethodCallee<'tcx>) + method_callee: ty::MethodCallee<'tcx>) -> Ty<'tcx> { let output_type = self.check_method_argument_types(call_expr.span, @@ -289,9 +292,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { arg_exprs, TupleArgumentsFlag::TupleArguments, expected); - self.write_call(call_expr, output_type); self.write_overloaded_call_method_map(call_expr, method_callee); + output_type } fn write_overloaded_call_method_map(&self, diff --git a/src/librustc_typeck/check/cast.rs b/src/librustc_typeck/check/cast.rs index 7a4cc09a7d..51a9b18392 100644 --- a/src/librustc_typeck/check/cast.rs +++ b/src/librustc_typeck/check/cast.rs @@ -78,8 +78,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn unsize_kind(&self, t: Ty<'tcx>) -> Option> { match t.sty { ty::TySlice(_) | ty::TyStr => Some(UnsizeKind::Length), - ty::TyTrait(ref tty) => Some(UnsizeKind::Vtable(tty.principal_def_id())), - ty::TyStruct(def, substs) => { + ty::TyTrait(ref tty) => Some(UnsizeKind::Vtable(tty.principal.def_id())), + ty::TyAdt(def, substs) if def.is_struct() => { // FIXME(arielb1): do some kind of normalization match def.struct_variant().fields.last() { None => None, @@ -161,6 +161,7 @@ impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { } CastError::CastToBool => { struct_span_err!(fcx.tcx.sess, self.span, E0054, "cannot cast as `bool`") + .span_label(self.span, &format!("unsupported cast")) .help("compare with zero instead") .emit(); } @@ -318,9 +319,9 @@ impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { (Some(t_from), Some(t_cast)) => (t_from, t_cast), // Function item types may need to be reified before casts. (None, Some(t_cast)) => { - if let ty::TyFnDef(_, _, f) = self.expr_ty.sty { + if let ty::TyFnDef(.., f) = self.expr_ty.sty { // Attempt a coercion to a fn pointer type. - let res = fcx.try_coerce(self.expr, fcx.tcx.mk_fn_ptr(f)); + let res = fcx.try_coerce(self.expr, self.expr_ty, fcx.tcx.mk_fn_ptr(f)); if !res.is_ok() { return Err(CastError::NonScalar); } @@ -470,7 +471,7 @@ impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { } fn try_coercion_cast(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> bool { - fcx.try_coerce(self.expr, self.cast_ty).is_ok() + fcx.try_coerce(self.expr, self.expr_ty, self.cast_ty).is_ok() } } diff --git a/src/librustc_typeck/check/closure.rs b/src/librustc_typeck/check/closure.rs index 3acb8017ee..9e41d1b567 100644 --- a/src/librustc_typeck/check/closure.rs +++ b/src/librustc_typeck/check/closure.rs @@ -13,7 +13,6 @@ use super::{check_fn, Expectation, FnCtxt}; use astconv::AstConv; -use rustc::ty::subst; use rustc::ty::{self, ToPolyTraitRef, Ty}; use std::cmp; use syntax::abi::Abi; @@ -25,7 +24,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { _capture: hir::CaptureClause, decl: &'gcx hir::FnDecl, body: &'gcx hir::Block, - expected: Expectation<'tcx>) { + expected: Expectation<'tcx>) -> Ty<'tcx> { debug!("check_expr_closure(expr={:?},expected={:?})", expr, expected); @@ -45,7 +44,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { opt_kind: Option, decl: &'gcx hir::FnDecl, body: &'gcx hir::Block, - expected_sig: Option>) { + expected_sig: Option>) -> Ty<'tcx> { let expr_def_id = self.tcx.map.local_def_id(expr.id); debug!("check_closure opt_kind={:?} expected_sig={:?}", @@ -71,10 +70,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.parameter_environment.free_substs, upvar_tys); - self.write_ty(expr.id, closure_type); - let fn_sig = self.tcx.liberate_late_bound_regions( self.tcx.region_maps.call_site_extent(expr.id, body.id), &fn_ty.sig); + let fn_sig = + (**self).normalize_associated_types_in(body.span, body.id, &fn_sig); check_fn(self, hir::Unsafety::Normal, expr.id, &fn_sig, decl, expr.id, &body); @@ -92,6 +91,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Some(kind) => { self.tables.borrow_mut().closure_kinds.insert(expr_def_id, kind); } None => { } } + + closure_type } fn deduce_expectations_from_expected_type(&self, expected_ty: Ty<'tcx>) @@ -102,12 +103,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { match expected_ty.sty { ty::TyTrait(ref object_type) => { - let proj_bounds = object_type.projection_bounds_with_self_ty(self.tcx, - self.tcx.types.err); - let sig = proj_bounds.iter() - .filter_map(|pb| self.deduce_sig_from_projection(pb)) - .next(); - let kind = self.tcx.lang_items.fn_trait_kind(object_type.principal_def_id()); + let sig = object_type.projection_bounds.iter().filter_map(|pb| { + let pb = pb.with_self_ty(self.tcx, self.tcx.types.err); + self.deduce_sig_from_projection(&pb) + }).next(); + let kind = self.tcx.lang_items.fn_trait_kind(object_type.principal.def_id()); (sig, kind) } ty::TyInfer(ty::TyVar(vid)) => { @@ -167,7 +167,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ty::Predicate::TypeOutlives(..) => None, ty::Predicate::WellFormed(..) => None, ty::Predicate::ObjectSafe(..) => None, - ty::Predicate::Rfc1592(..) => None, // NB: This predicate is created by breaking down a // `ClosureType: FnFoo()` predicate, where @@ -205,7 +204,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { return None; } - let arg_param_ty = *trait_ref.substs().types.get(subst::TypeSpace, 0); + let arg_param_ty = trait_ref.substs().type_at(1); let arg_param_ty = self.resolve_type_vars_if_possible(&arg_param_ty); debug!("deduce_sig_from_projection: arg_param_ty {:?}", arg_param_ty); diff --git a/src/librustc_typeck/check/coercion.rs b/src/librustc_typeck/check/coercion.rs index 4a0d529812..98a05989b1 100644 --- a/src/librustc_typeck/check/coercion.rs +++ b/src/librustc_typeck/check/coercion.rs @@ -195,7 +195,7 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { } match a.sty { - ty::TyFnDef(_, _, a_f) => { + ty::TyFnDef(.., a_f) => { // Function items are coercible to any closure // type; function pointers are not (that would // require double indirection). @@ -336,7 +336,7 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { if r_borrow_var.is_none() { // create var lazilly, at most once let coercion = Coercion(span); let r = self.next_region_var(coercion); - r_borrow_var = Some(self.tcx.mk_region(r)); // [4] above + r_borrow_var = Some(r); // [4] above } r_borrow_var.unwrap() }; @@ -436,8 +436,7 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { let coercion = Coercion(self.origin.span()); let r_borrow = self.next_region_var(coercion); - let region = self.tcx.mk_region(r_borrow); - (mt_a.ty, Some(AutoPtr(region, mt_b.mutbl))) + (mt_a.ty, Some(AutoPtr(r_borrow, mt_b.mutbl))) } (&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) => { coerce_mutbls(mt_a.mutbl, mt_b.mutbl)?; @@ -459,7 +458,7 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { coerce_unsized_did, 0, source, - vec![target])); + &[target])); // Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid // emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where @@ -485,7 +484,7 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { // Object safety violations or miscellaneous. Err(err) => { - self.report_selection_error(&obligation, &err, None); + self.report_selection_error(&obligation, &err); // Treat this like an obligation and follow through // with the unsizing - the lack of a coercion should // be silent, as it causes a type mismatch later. @@ -631,9 +630,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// The expressions *must not* have any pre-existing adjustments. pub fn try_coerce(&self, expr: &hir::Expr, + expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { - let source = self.resolve_type_vars_with_obligations(self.expr_ty(expr)); + let source = self.resolve_type_vars_with_obligations(expr_ty); debug!("coercion::try({:?}: {:?} -> {:?})", expr, source, target); let mut coerce = Coerce::new(self, TypeOrigin::ExprAssignable(expr.span)); @@ -659,14 +659,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { origin: TypeOrigin, exprs: E, prev_ty: Ty<'tcx>, - new: &'b hir::Expr) + new: &'b hir::Expr, + new_ty: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> // FIXME(eddyb) use copyable iterators when that becomes ergonomic. where E: Fn() -> I, I: IntoIterator { let prev_ty = self.resolve_type_vars_with_obligations(prev_ty); - let new_ty = self.resolve_type_vars_with_obligations(self.expr_ty(new)); + let new_ty = self.resolve_type_vars_with_obligations(new_ty); debug!("coercion::try_find_lub({:?}, {:?})", prev_ty, new_ty); let trace = TypeTrace::types(origin, true, prev_ty, new_ty); @@ -742,7 +743,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { autoderefs: 1, autoref: Some(AutoPtr(_, mutbl_adj)), unsize: None - })) => match self.expr_ty(expr).sty { + })) => match self.node_ty(expr.id).sty { ty::TyRef(_, mt_orig) => { // Reborrow that we can safely ignore. mutbl_adj == mt_orig.mutbl diff --git a/src/librustc_typeck/check/compare_method.rs b/src/librustc_typeck/check/compare_method.rs index e6ddc6ad69..ffff05885a 100644 --- a/src/librustc_typeck/check/compare_method.rs +++ b/src/librustc_typeck/check/compare_method.rs @@ -12,10 +12,9 @@ use middle::free_region::FreeRegionMap; use rustc::infer::{self, InferOk, TypeOrigin}; use rustc::ty; use rustc::traits::{self, Reveal}; -use rustc::ty::error::ExpectedFound; -use rustc::ty::subst::{self, Subst, Substs, VecPerParamSpace}; -use rustc::hir::map::Node; -use rustc::hir::{ImplItemKind, TraitItem_}; +use rustc::ty::error::{ExpectedFound, TypeError}; +use rustc::ty::subst::{Subst, Substs}; +use rustc::hir::{ImplItemKind, TraitItem_, Ty_}; use syntax::ast; use syntax_pos::Span; @@ -39,7 +38,8 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, impl_m_span: Span, impl_m_body_id: ast::NodeId, trait_m: &ty::Method<'tcx>, - impl_trait_ref: &ty::TraitRef<'tcx>) { + impl_trait_ref: &ty::TraitRef<'tcx>, + trait_item_span: Option) { debug!("compare_impl_method(impl_trait_ref={:?})", impl_trait_ref); @@ -95,10 +95,22 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, } } - let num_impl_m_type_params = impl_m.generics.types.len(subst::FnSpace); - let num_trait_m_type_params = trait_m.generics.types.len(subst::FnSpace); + let num_impl_m_type_params = impl_m.generics.types.len(); + let num_trait_m_type_params = trait_m.generics.types.len(); if num_impl_m_type_params != num_trait_m_type_params { - span_err!(tcx.sess, impl_m_span, E0049, + let impl_m_node_id = tcx.map.as_local_node_id(impl_m.def_id).unwrap(); + let span = match tcx.map.expect_impl_item(impl_m_node_id).node { + ImplItemKind::Method(ref impl_m_sig, _) => { + if impl_m_sig.generics.is_parameterized() { + impl_m_sig.generics.span + } else { + impl_m_span + } + } + _ => bug!("{:?} is not a method", impl_m) + }; + + let mut err = struct_span_err!(tcx.sess, span, E0049, "method `{}` has {} type parameter{} \ but its trait declaration has {} type parameter{}", trait_m.name, @@ -106,6 +118,32 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, if num_impl_m_type_params == 1 {""} else {"s"}, num_trait_m_type_params, if num_trait_m_type_params == 1 {""} else {"s"}); + + let mut suffix = None; + + if let Some(span) = trait_item_span { + err.span_label(span, + &format!("expected {}", + &if num_trait_m_type_params != 1 { + format!("{} type parameters", num_trait_m_type_params) + } else { + format!("{} type parameter", num_trait_m_type_params) + })); + } else { + suffix = Some(format!(", expected {}", num_trait_m_type_params)); + } + + err.span_label(span, + &format!("found {}{}", + &if num_impl_m_type_params != 1 { + format!("{} type parameters", num_impl_m_type_params) + } else { + format!("1 type parameter") + }, + suffix.as_ref().map(|s| &s[..]).unwrap_or(""))); + + err.emit(); + return; } @@ -194,10 +232,8 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, // Create mapping from trait to skolemized. let trait_to_skol_substs = - trait_to_impl_substs - .subst(tcx, impl_to_skol_substs).clone() - .with_method(impl_to_skol_substs.types.get_slice(subst::FnSpace).to_vec(), - impl_to_skol_substs.regions.get_slice(subst::FnSpace).to_vec()); + impl_to_skol_substs.rebase_onto(tcx, impl_m.container_id(), + trait_to_impl_substs.subst(tcx, impl_to_skol_substs)); debug!("compare_impl_method: trait_to_skol_substs={:?}", trait_to_skol_substs); @@ -208,7 +244,7 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, impl_m, &trait_m.generics, &impl_m.generics, - &trait_to_skol_substs, + trait_to_skol_substs, impl_to_skol_substs) { return; } @@ -216,58 +252,49 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, tcx.infer_ctxt(None, None, Reveal::NotSpecializable).enter(|mut infcx| { let mut fulfillment_cx = traits::FulfillmentContext::new(); - // Normalize the associated types in the trait_bounds. - let trait_bounds = trait_m.predicates.instantiate(tcx, &trait_to_skol_substs); - // Create obligations for each predicate declared by the impl // definition in the context of the trait's parameter // environment. We can't just use `impl_env.caller_bounds`, // however, because we want to replace all late-bound regions with // region variables. - let impl_bounds = - impl_m.predicates.instantiate(tcx, impl_to_skol_substs); - - debug!("compare_impl_method: impl_bounds={:?}", impl_bounds); + let impl_predicates = tcx.lookup_predicates(impl_m.predicates.parent.unwrap()); + let mut hybrid_preds = impl_predicates.instantiate(tcx, impl_to_skol_substs); - // Obtain the predicate split predicate sets for each. - let trait_pred = trait_bounds.predicates.split(); - let impl_pred = impl_bounds.predicates.split(); + debug!("compare_impl_method: impl_bounds={:?}", hybrid_preds); // This is the only tricky bit of the new way we check implementation methods - // We need to build a set of predicates where only the FnSpace bounds + // We need to build a set of predicates where only the method-level bounds // are from the trait and we assume all other bounds from the implementation // to be previously satisfied. // // We then register the obligations from the impl_m and check to see // if all constraints hold. - let hybrid_preds = VecPerParamSpace::new( - impl_pred.types, - impl_pred.selfs, - trait_pred.fns - ); + hybrid_preds.predicates.extend( + trait_m.predicates.instantiate_own(tcx, trait_to_skol_substs).predicates); // Construct trait parameter environment and then shift it into the skolemized viewpoint. // The key step here is to update the caller_bounds's predicates to be // the new hybrid bounds we computed. let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_body_id); - let trait_param_env = impl_param_env.with_caller_bounds(hybrid_preds.into_vec()); + let trait_param_env = impl_param_env.with_caller_bounds(hybrid_preds.predicates); let trait_param_env = traits::normalize_param_env_or_error(tcx, trait_param_env, normalize_cause.clone()); // FIXME(@jroesch) this seems ugly, but is a temporary change infcx.parameter_environment = trait_param_env; - debug!("compare_impl_method: trait_bounds={:?}", + debug!("compare_impl_method: caller_bounds={:?}", infcx.parameter_environment.caller_bounds); let mut selcx = traits::SelectionContext::new(&infcx); - let (impl_pred_fns, _) = + let impl_m_own_bounds = impl_m.predicates.instantiate_own(tcx, impl_to_skol_substs); + let (impl_m_own_bounds, _) = infcx.replace_late_bound_regions_with_fresh_var( impl_m_span, infer::HigherRankedType, - &ty::Binder(impl_pred.fns)); - for predicate in impl_pred_fns { + &ty::Binder(impl_m_own_bounds.predicates)); + for predicate in impl_m_own_bounds { let traits::Normalized { value: predicate, .. } = traits::normalize(&mut selcx, normalize_cause.clone(), &predicate); @@ -314,7 +341,7 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, let impl_fty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { unsafety: impl_m.fty.unsafety, abi: impl_m.fty.abi, - sig: ty::Binder(impl_sig) + sig: ty::Binder(impl_sig.clone()) })); debug!("compare_impl_method: impl_fty={:?}", impl_fty); @@ -322,7 +349,7 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, infcx.parameter_environment.free_id_outlive, &trait_m.fty.sig); let trait_sig = - trait_sig.subst(tcx, &trait_to_skol_substs); + trait_sig.subst(tcx, trait_to_skol_substs); let trait_sig = assoc::normalize_associated_types_in(&infcx, &mut fulfillment_cx, @@ -332,7 +359,7 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, let trait_fty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { unsafety: trait_m.fty.unsafety, abi: trait_m.fty.abi, - sig: ty::Binder(trait_sig) + sig: ty::Binder(trait_sig.clone()) })); debug!("compare_impl_method: trait_fty={:?}", trait_fty); @@ -342,16 +369,26 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, impl_fty, trait_fty); + let (impl_err_span, trait_err_span) = + extract_spans_for_error_reporting(&infcx, &terr, origin, impl_m, + impl_sig, trait_m, trait_sig); + + let origin = TypeOrigin::MethodCompatCheck(impl_err_span); + let mut diag = struct_span_err!( tcx.sess, origin.span(), E0053, "method `{}` has an incompatible type for trait", trait_m.name ); + infcx.note_type_err( - &mut diag, origin, + &mut diag, + origin, + trait_err_span.map(|sp| (sp, format!("type in trait"))), Some(infer::ValuePairs::Types(ExpectedFound { - expected: trait_fty, - found: impl_fty - })), &terr + expected: trait_fty, + found: impl_fty + })), + &terr ); diag.emit(); return @@ -390,8 +427,8 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, -> bool { - let trait_params = trait_generics.regions.get_slice(subst::FnSpace); - let impl_params = impl_generics.regions.get_slice(subst::FnSpace); + let trait_params = &trait_generics.regions[..]; + let impl_params = &impl_generics.regions[..]; debug!("check_region_bounds_on_impl_method: \ trait_generics={:?} \ @@ -413,15 +450,96 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, // are zero. Since I don't quite know how to phrase things at // the moment, give a kind of vague error message. if trait_params.len() != impl_params.len() { - span_err!(ccx.tcx.sess, span, E0195, + struct_span_err!(ccx.tcx.sess, span, E0195, "lifetime parameters or bounds on method `{}` do \ - not match the trait declaration", - impl_m.name); + not match the trait declaration",impl_m.name) + .span_label(span, &format!("lifetimes do not match trait")) + .emit(); return false; } return true; } + + fn extract_spans_for_error_reporting<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>, + terr: &TypeError, + origin: TypeOrigin, + impl_m: &ty::Method, + impl_sig: ty::FnSig<'tcx>, + trait_m: &ty::Method, + trait_sig: ty::FnSig<'tcx>) + -> (Span, Option) { + let tcx = infcx.tcx; + let impl_m_node_id = tcx.map.as_local_node_id(impl_m.def_id).unwrap(); + let (impl_m_output, impl_m_iter) = match tcx.map.expect_impl_item(impl_m_node_id).node { + ImplItemKind::Method(ref impl_m_sig, _) => + (&impl_m_sig.decl.output, impl_m_sig.decl.inputs.iter()), + _ => bug!("{:?} is not a method", impl_m) + }; + + match *terr { + TypeError::Mutability => { + if let Some(trait_m_node_id) = tcx.map.as_local_node_id(trait_m.def_id) { + let trait_m_iter = match tcx.map.expect_trait_item(trait_m_node_id).node { + TraitItem_::MethodTraitItem(ref trait_m_sig, _) => + trait_m_sig.decl.inputs.iter(), + _ => bug!("{:?} is not a MethodTraitItem", trait_m) + }; + + impl_m_iter.zip(trait_m_iter).find(|&(ref impl_arg, ref trait_arg)| { + match (&impl_arg.ty.node, &trait_arg.ty.node) { + (&Ty_::TyRptr(_, ref impl_mt), &Ty_::TyRptr(_, ref trait_mt)) | + (&Ty_::TyPtr(ref impl_mt), &Ty_::TyPtr(ref trait_mt)) => + impl_mt.mutbl != trait_mt.mutbl, + _ => false + } + }).map(|(ref impl_arg, ref trait_arg)| { + match (impl_arg.to_self(), trait_arg.to_self()) { + (Some(impl_self), Some(trait_self)) => + (impl_self.span, Some(trait_self.span)), + (None, None) => (impl_arg.ty.span, Some(trait_arg.ty.span)), + _ => bug!("impl and trait fns have different first args, \ + impl: {:?}, trait: {:?}", impl_arg, trait_arg) + } + }).unwrap_or((origin.span(), tcx.map.span_if_local(trait_m.def_id))) + } else { + (origin.span(), tcx.map.span_if_local(trait_m.def_id)) + } + } + TypeError::Sorts(ExpectedFound { .. }) => { + if let Some(trait_m_node_id) = tcx.map.as_local_node_id(trait_m.def_id) { + let (trait_m_output, trait_m_iter) = + match tcx.map.expect_trait_item(trait_m_node_id).node { + TraitItem_::MethodTraitItem(ref trait_m_sig, _) => + (&trait_m_sig.decl.output, trait_m_sig.decl.inputs.iter()), + _ => bug!("{:?} is not a MethodTraitItem", trait_m) + }; + + let impl_iter = impl_sig.inputs.iter(); + let trait_iter = trait_sig.inputs.iter(); + impl_iter.zip(trait_iter).zip(impl_m_iter).zip(trait_m_iter) + .filter_map(|(((impl_arg_ty, trait_arg_ty), impl_arg), trait_arg)| { + match infcx.sub_types(true, origin, trait_arg_ty, impl_arg_ty) { + Ok(_) => None, + Err(_) => Some((impl_arg.ty.span, Some(trait_arg.ty.span))) + } + }) + .next() + .unwrap_or_else(|| { + if infcx.sub_types(false, origin, impl_sig.output, + trait_sig.output).is_err() { + (impl_m_output.span(), Some(trait_m_output.span())) + } else { + (origin.span(), tcx.map.span_if_local(trait_m.def_id)) + } + }) + } else { + (origin.span(), tcx.map.span_if_local(trait_m.def_id)) + } + } + _ => (origin.span(), tcx.map.span_if_local(trait_m.def_id)) + } + } } pub fn compare_const_impl<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, @@ -453,16 +571,14 @@ pub fn compare_const_impl<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, // Create mapping from trait to skolemized. let trait_to_skol_substs = - trait_to_impl_substs - .subst(tcx, impl_to_skol_substs).clone() - .with_method(impl_to_skol_substs.types.get_slice(subst::FnSpace).to_vec(), - impl_to_skol_substs.regions.get_slice(subst::FnSpace).to_vec()); + impl_to_skol_substs.rebase_onto(tcx, impl_c.container.id(), + trait_to_impl_substs.subst(tcx, impl_to_skol_substs)); debug!("compare_const_impl: trait_to_skol_substs={:?}", trait_to_skol_substs); // Compute skolemized form of impl and trait const tys. let impl_ty = impl_c.ty.subst(tcx, impl_to_skol_substs); - let trait_ty = trait_c.ty.subst(tcx, &trait_to_skol_substs); + let trait_ty = trait_c.ty.subst(tcx, trait_to_skol_substs); let mut origin = TypeOrigin::Misc(impl_c_span); let err = infcx.commit_if_ok(|_| { @@ -471,7 +587,7 @@ pub fn compare_const_impl<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, assoc::normalize_associated_types_in(&infcx, &mut fulfillment_cx, impl_c_span, - 0, + ast::CRATE_NODE_ID, &impl_ty); debug!("compare_const_impl: impl_ty={:?}", @@ -481,7 +597,7 @@ pub fn compare_const_impl<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, assoc::normalize_associated_types_in(&infcx, &mut fulfillment_cx, impl_c_span, - 0, + ast::CRATE_NODE_ID, &trait_ty); debug!("compare_const_impl: trait_ty={:?}", @@ -500,12 +616,9 @@ pub fn compare_const_impl<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, trait_ty); // Locate the Span containing just the type of the offending impl - if let Some(impl_trait_node) = tcx.map.get_if_local(impl_c.def_id) { - if let Node::NodeImplItem(impl_trait_item) = impl_trait_node { - if let ImplItemKind::Const(ref ty, _) = impl_trait_item.node { - origin = TypeOrigin::Misc(ty.span); - } - } + match tcx.map.expect_impl_item(impl_c_node_id).node { + ImplItemKind::Const(ref ty, _) => origin = TypeOrigin::Misc(ty.span), + _ => bug!("{:?} is not a impl const", impl_c) } let mut diag = struct_span_err!( @@ -515,16 +628,16 @@ pub fn compare_const_impl<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, ); // Add a label to the Span containing just the type of the item - if let Some(orig_trait_node) = tcx.map.get_if_local(trait_c.def_id) { - if let Node::NodeTraitItem(orig_trait_item) = orig_trait_node { - if let TraitItem_::ConstTraitItem(ref ty, _) = orig_trait_item.node { - diag.span_label(ty.span, &format!("original trait requirement")); - } - } - } + let trait_c_node_id = tcx.map.as_local_node_id(trait_c.def_id).unwrap(); + let trait_c_span = match tcx.map.expect_trait_item(trait_c_node_id).node { + TraitItem_::ConstTraitItem(ref ty, _) => ty.span, + _ => bug!("{:?} is not a trait const", trait_c) + }; infcx.note_type_err( - &mut diag, origin, + &mut diag, + origin, + Some((trait_c_span, format!("type in trait"))), Some(infer::ValuePairs::Types(ExpectedFound { expected: trait_ty, found: impl_ty diff --git a/src/librustc_typeck/check/demand.rs b/src/librustc_typeck/check/demand.rs index 1f3a83ebc1..d622bc7f75 100644 --- a/src/librustc_typeck/check/demand.rs +++ b/src/librustc_typeck/check/demand.rs @@ -53,11 +53,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } // Checks that the type of `expr` can be coerced to `expected`. - pub fn demand_coerce(&self, expr: &hir::Expr, expected: Ty<'tcx>) { + pub fn demand_coerce(&self, expr: &hir::Expr, checked_ty: Ty<'tcx>, expected: Ty<'tcx>) { let expected = self.resolve_type_vars_with_obligations(expected); - if let Err(e) = self.try_coerce(expr, expected) { + if let Err(e) = self.try_coerce(expr, checked_ty, expected) { let origin = TypeOrigin::Misc(expr.span); - let expr_ty = self.resolve_type_vars_with_obligations(self.expr_ty(expr)); + let expr_ty = self.resolve_type_vars_with_obligations(checked_ty); self.report_mismatched_types(origin, expected, expr_ty, e); } } diff --git a/src/librustc_typeck/check/dropck.rs b/src/librustc_typeck/check/dropck.rs index f3a01ef740..cc958fb3b2 100644 --- a/src/librustc_typeck/check/dropck.rs +++ b/src/librustc_typeck/check/dropck.rs @@ -15,8 +15,8 @@ use hir::def_id::DefId; use middle::free_region::FreeRegionMap; use rustc::infer; use middle::region; -use rustc::ty::subst::{self, Subst}; -use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::subst::{Subst, Substs}; +use rustc::ty::{self, AdtKind, Ty, TyCtxt}; use rustc::traits::{self, Reveal}; use util::nodemap::FnvHashSet; @@ -41,16 +41,13 @@ use syntax_pos::{self, Span}; /// cannot do `struct S; impl Drop for S { ... }`). /// pub fn check_drop_impl(ccx: &CrateCtxt, drop_impl_did: DefId) -> Result<(), ()> { - let ty::TypeScheme { generics: ref dtor_generics, - ty: dtor_self_type } = ccx.tcx.lookup_item_type(drop_impl_did); + let dtor_self_type = ccx.tcx.lookup_item_type(drop_impl_did).ty; let dtor_predicates = ccx.tcx.lookup_predicates(drop_impl_did); match dtor_self_type.sty { - ty::TyEnum(adt_def, self_to_impl_substs) | - ty::TyStruct(adt_def, self_to_impl_substs) => { + ty::TyAdt(adt_def, self_to_impl_substs) => { ensure_drop_params_and_item_params_correspond(ccx, drop_impl_did, - dtor_generics, - &dtor_self_type, + dtor_self_type, adt_def.did)?; ensure_drop_predicates_are_implied_by_item_defn(ccx, @@ -73,8 +70,7 @@ pub fn check_drop_impl(ccx: &CrateCtxt, drop_impl_did: DefId) -> Result<(), ()> fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>( ccx: &CrateCtxt<'a, 'tcx>, drop_impl_did: DefId, - drop_impl_generics: &ty::Generics<'tcx>, - drop_impl_ty: &ty::Ty<'tcx>, + drop_impl_ty: Ty<'tcx>, self_type_did: DefId) -> Result<(), ()> { let tcx = ccx.tcx; @@ -93,8 +89,8 @@ fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>( let drop_impl_span = tcx.map.def_id_span(drop_impl_did, syntax_pos::DUMMY_SP); let fresh_impl_substs = - infcx.fresh_substs_for_generics(drop_impl_span, drop_impl_generics); - let fresh_impl_self_ty = drop_impl_ty.subst(tcx, &fresh_impl_substs); + infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did); + let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs); if let Err(_) = infcx.eq_types(true, infer::TypeOrigin::Misc(drop_impl_span), named_type, fresh_impl_self_ty) { @@ -114,10 +110,6 @@ fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>( return Err(()); } - if let Err(ref errors) = fulfillment_cx.select_rfc1592_obligations(&infcx) { - infcx.report_fulfillment_errors_as_warnings(errors, drop_impl_node_id); - } - let free_regions = FreeRegionMap::new(); infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id); Ok(()) @@ -131,7 +123,7 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( drop_impl_did: DefId, dtor_predicates: &ty::GenericPredicates<'tcx>, self_type_did: DefId, - self_to_impl_substs: &subst::Substs<'tcx>) -> Result<(), ()> { + self_to_impl_substs: &Substs<'tcx>) -> Result<(), ()> { // Here is an example, analogous to that from // `compare_impl_method`. @@ -179,10 +171,7 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( let generic_assumptions = tcx.lookup_predicates(self_type_did); let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs); - assert!(assumptions_in_impl_context.predicates.is_empty_in(subst::SelfSpace)); - assert!(assumptions_in_impl_context.predicates.is_empty_in(subst::FnSpace)); - let assumptions_in_impl_context = - assumptions_in_impl_context.predicates.get_slice(subst::TypeSpace); + let assumptions_in_impl_context = assumptions_in_impl_context.predicates; // An earlier version of this code attempted to do this checking // via the traits::fulfill machinery. However, it ran into trouble @@ -190,10 +179,8 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( // 'a:'b and T:'b into region inference constraints. It is simpler // just to look for all the predicates directly. - assert!(dtor_predicates.predicates.is_empty_in(subst::SelfSpace)); - assert!(dtor_predicates.predicates.is_empty_in(subst::FnSpace)); - let predicates = dtor_predicates.predicates.get_slice(subst::TypeSpace); - for predicate in predicates { + assert_eq!(dtor_predicates.parent, None); + for predicate in &dtor_predicates.predicates { // (We do not need to worry about deep analysis of type // expressions etc because the Drop impls are already forced // to take on a structure that is roughly an alpha-renaming of @@ -312,11 +299,13 @@ pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>( TypeContext::ADT { def_id, variant, field } => { let adt = tcx.lookup_adt_def(def_id); let variant_name = match adt.adt_kind() { - ty::AdtKind::Enum => format!("enum {} variant {}", - tcx.item_path_str(def_id), - variant), - ty::AdtKind::Struct => format!("struct {}", - tcx.item_path_str(def_id)) + AdtKind::Enum => format!("enum {} variant {}", + tcx.item_path_str(def_id), + variant), + AdtKind::Struct => format!("struct {}", + tcx.item_path_str(def_id)), + AdtKind::Union => format!("union {}", + tcx.item_path_str(def_id)), }; span_note!( &mut err, @@ -420,7 +409,7 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>( ty); cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span), - ty, ty::ReScope(cx.parent_scope)); + ty, tcx.mk_region(ty::ReScope(cx.parent_scope))); return Ok(()); } @@ -444,14 +433,14 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>( cx, context, ity, depth+1) } - ty::TyStruct(def, substs) if def.is_phantom_data() => { + ty::TyAdt(def, substs) if def.is_phantom_data() => { // PhantomData - behaves identically to T - let ity = *substs.types.get(subst::TypeSpace, 0); + let ity = substs.type_at(0); iterate_over_potentially_unsafe_regions_in_type( cx, context, ity, depth+1) } - ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { + ty::TyAdt(def, substs) => { let did = def.did; for variant in &def.variants { for field in variant.fields.iter() { @@ -506,7 +495,7 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>( fn has_dtor_of_interest<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>) -> bool { match ty.sty { - ty::TyEnum(def, _) | ty::TyStruct(def, _) => { + ty::TyAdt(def, _) => { def.is_dtorck(tcx) } ty::TyTrait(..) | ty::TyProjection(..) | ty::TyAnon(..) => { diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs index 4334f04377..93d8b3e156 100644 --- a/src/librustc_typeck/check/intrinsic.rs +++ b/src/librustc_typeck/check/intrinsic.rs @@ -13,12 +13,12 @@ use intrinsics; use rustc::infer::TypeOrigin; -use rustc::ty::subst::{self, Substs}; +use rustc::ty::subst::Substs; use rustc::ty::FnSig; use rustc::ty::{self, Ty}; +use rustc::util::nodemap::FnvHashMap; use {CrateCtxt, require_same_types}; -use std::collections::{HashMap}; use syntax::abi::Abi; use syntax::ast; use syntax::parse::token; @@ -36,11 +36,11 @@ fn equate_intrinsic_type<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, let def_id = tcx.map.local_def_id(it.id); let i_ty = tcx.lookup_item_type(def_id); - let mut substs = Substs::empty(); - substs.types = i_ty.generics.types.map(|def| tcx.mk_param_from_def(def)); + let substs = Substs::for_item(tcx, def_id, + |_, _| tcx.mk_region(ty::ReErased), + |def, _| tcx.mk_param_from_def(def)); - let fty = tcx.mk_fn_def(def_id, tcx.mk_substs(substs), - tcx.mk_bare_fn(ty::BareFnTy { + let fty = tcx.mk_fn_def(def_id, substs, tcx.mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Unsafe, abi: abi, sig: ty::Binder(FnSig { @@ -49,14 +49,19 @@ fn equate_intrinsic_type<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, variadic: false, }), })); - let i_n_tps = i_ty.generics.types.len(subst::FnSpace); + let i_n_tps = i_ty.generics.types.len(); if i_n_tps != n_tps { - struct_span_err!(tcx.sess, it.span, E0094, - "intrinsic has wrong number of type \ - parameters: found {}, expected {}", - i_n_tps, n_tps) - .span_label(it.span, &format!("expected {} type parameter", n_tps)) - .emit(); + let span = match it.node { + hir::ForeignItemFn(_, ref generics) => generics.span, + hir::ForeignItemStatic(..) => it.span + }; + + struct_span_err!(tcx.sess, span, E0094, + "intrinsic has wrong number of type \ + parameters: found {}, expected {}", + i_n_tps, n_tps) + .span_label(span, &format!("expected {} type parameter", n_tps)) + .emit(); } else { require_same_types(ccx, TypeOrigin::IntrinsicType(it.span), @@ -70,7 +75,7 @@ fn equate_intrinsic_type<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { fn param<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, n: u32) -> Ty<'tcx> { let name = token::intern(&format!("P{}", n)); - ccx.tcx.mk_param(subst::FnSpace, n, name) + ccx.tcx.mk_param(n, name) } let tcx = ccx.tcx; @@ -122,7 +127,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { ], ccx.tcx.types.usize) } "rustc_peek" => (1, vec![param(ccx, 0)], param(ccx, 0)), - "init" | "init_dropped" => (1, Vec::new(), param(ccx, 0)), + "init" => (1, Vec::new(), param(ccx, 0)), "uninit" => (1, Vec::new(), param(ccx, 0)), "forget" => (1, vec!( param(ccx, 0) ), tcx.mk_nil()), "transmute" => (2, vec!( param(ccx, 0) ), param(ccx, 1)), @@ -280,6 +285,8 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { (1, vec![param(ccx, 0), param(ccx, 0)], param(ccx, 0)), "assume" => (0, vec![tcx.types.bool], tcx.mk_nil()), + "likely" => (0, vec![tcx.types.bool], tcx.types.bool), + "unlikely" => (0, vec![tcx.types.bool], tcx.types.bool), "discriminant_value" => (1, vec![ tcx.mk_imm_ref(tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), @@ -301,8 +308,11 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { } ref other => { - span_err!(tcx.sess, it.span, E0093, - "unrecognized intrinsic function: `{}`", *other); + struct_span_err!(tcx.sess, it.span, E0093, + "unrecognized intrinsic function: `{}`", + *other) + .span_label(it.span, &format!("unrecognized intrinsic")) + .emit(); return; } }; @@ -316,12 +326,12 @@ pub fn check_platform_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { let param = |n| { let name = token::intern(&format!("P{}", n)); - ccx.tcx.mk_param(subst::FnSpace, n, name) + ccx.tcx.mk_param(n, name) }; let tcx = ccx.tcx; let i_ty = tcx.lookup_item_type(tcx.map.local_def_id(it.id)); - let i_n_tps = i_ty.generics.types.len(subst::FnSpace); + let i_n_tps = i_ty.generics.types.len(); let name = it.name.as_str(); let (n_tps, inputs, output) = match &*name { @@ -362,14 +372,14 @@ pub fn check_platform_intrinsic_type(ccx: &CrateCtxt, return } - let mut structural_to_nomimal = HashMap::new(); + let mut structural_to_nomimal = FnvHashMap(); let sig = tcx.no_late_bound_regions(i_ty.ty.fn_sig()).unwrap(); if intr.inputs.len() != sig.inputs.len() { span_err!(tcx.sess, it.span, E0444, "platform-specific intrinsic has invalid number of \ arguments: found {}, expected {}", - intr.inputs.len(), sig.inputs.len()); + sig.inputs.len(), intr.inputs.len()); return } let input_pairs = intr.inputs.iter().zip(&sig.inputs); @@ -402,7 +412,7 @@ fn match_intrinsic_type_to_type<'tcx, 'a>( ccx: &CrateCtxt<'a, 'tcx>, position: &str, span: Span, - structural_to_nominal: &mut HashMap<&'a intrinsics::Type, ty::Ty<'tcx>>, + structural_to_nominal: &mut FnvHashMap<&'a intrinsics::Type, ty::Ty<'tcx>>, expected: &'a intrinsics::Type, t: ty::Ty<'tcx>) { use intrinsics::Type::*; diff --git a/src/librustc_typeck/check/method/confirm.rs b/src/librustc_typeck/check/method/confirm.rs index 5fac65bbfd..ab59fafb65 100644 --- a/src/librustc_typeck/check/method/confirm.rs +++ b/src/librustc_typeck/check/method/confirm.rs @@ -12,7 +12,7 @@ use super::probe; use check::{FnCtxt, callee}; use hir::def_id::DefId; -use rustc::ty::subst::{self}; +use rustc::ty::subst::Substs; use rustc::traits; use rustc::ty::{self, LvaluePreference, NoPreference, PreferMutLvalue, Ty}; use rustc::ty::adjustment::{AdjustDerefRef, AutoDerefRef, AutoPtr}; @@ -42,10 +42,6 @@ struct InstantiatedMethodSig<'tcx> { /// argument is the receiver. method_sig: ty::FnSig<'tcx>, - /// Substitutions for all types/early-bound-regions declared on - /// the method. - all_substs: subst::Substs<'tcx>, - /// Generic bounds on the method's parameters which must be added /// as pending obligations. method_predicates: ty::InstantiatedPredicates<'tcx>, @@ -105,9 +101,8 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { // Create the final signature for the method, replacing late-bound regions. let InstantiatedMethodSig { - method_sig, all_substs, method_predicates + method_sig, method_predicates } = self.instantiate_method_sig(&pick, all_substs); - let all_substs = self.tcx.mk_substs(all_substs); let method_self_ty = method_sig.inputs[0]; // Unify the (adjusted) self type with what the method expects. @@ -150,7 +145,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { { let (autoref, unsize) = if let Some(mutbl) = pick.autoref { let region = self.next_region_var(infer::Autoref(self.span)); - let autoref = AutoPtr(self.tcx.mk_region(region), mutbl); + let autoref = AutoPtr(region, mutbl); (Some(autoref), pick.unsize.map(|target| { target.adjust_for_autoref(self.tcx, Some(autoref)) })) @@ -198,7 +193,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { fn fresh_receiver_substs(&mut self, self_ty: Ty<'tcx>, pick: &probe::Pick<'tcx>) - -> subst::Substs<'tcx> + -> &'tcx Substs<'tcx> { match pick.kind { probe::InherentImplPick => { @@ -210,7 +205,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { probe::ObjectPick => { let trait_def_id = pick.item.container().id(); - self.extract_trait_ref(self_ty, |this, object_ty, data| { + self.extract_existential_trait_ref(self_ty, |this, object_ty, principal| { // The object data has no entry for the Self // Type. For the purposes of this method call, we // substitute the object type itself. This @@ -222,16 +217,16 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { // been ruled out when we deemed the trait to be // "object safe". let original_poly_trait_ref = - data.principal_trait_ref_with_self_ty(this.tcx, object_ty); + principal.with_self_ty(this.tcx, object_ty); let upcast_poly_trait_ref = - this.upcast(original_poly_trait_ref.clone(), trait_def_id); + this.upcast(original_poly_trait_ref, trait_def_id); let upcast_trait_ref = this.replace_late_bound_regions_with_fresh_var(&upcast_poly_trait_ref); debug!("original_poly_trait_ref={:?} upcast_trait_ref={:?} target_trait={:?}", original_poly_trait_ref, upcast_trait_ref, trait_def_id); - upcast_trait_ref.substs.clone() + upcast_trait_ref.substs }) } @@ -249,35 +244,36 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { let impl_trait_ref = self.instantiate_type_scheme( self.span, - &impl_polytype.substs, + impl_polytype.substs, &self.tcx.impl_trait_ref(impl_def_id).unwrap()); - impl_trait_ref.substs.clone() + impl_trait_ref.substs } probe::TraitPick => { let trait_def_id = pick.item.container().id(); - let trait_def = self.tcx.lookup_trait_def(trait_def_id); // Make a trait reference `$0 : Trait<$1...$n>` // consisting entirely of type variables. Later on in // the process we will unify the transformed-self-type // of the method with the actual type in order to // unify some of these variables. - self.fresh_substs_for_trait(self.span, - &trait_def.generics, - self.next_ty_var()) + self.fresh_substs_for_item(self.span, trait_def_id) } probe::WhereClausePick(ref poly_trait_ref) => { // Where clauses can have bound regions in them. We need to instantiate // those to convert from a poly-trait-ref to a trait-ref. - self.replace_late_bound_regions_with_fresh_var(&poly_trait_ref).substs.clone() + self.replace_late_bound_regions_with_fresh_var(&poly_trait_ref).substs } } } - fn extract_trait_ref(&mut self, self_ty: Ty<'tcx>, mut closure: F) -> R where - F: FnMut(&mut ConfirmContext<'a, 'gcx, 'tcx>, Ty<'tcx>, &ty::TraitTy<'tcx>) -> R, + fn extract_existential_trait_ref(&mut self, + self_ty: Ty<'tcx>, + mut closure: F) -> R + where F: FnMut(&mut ConfirmContext<'a, 'gcx, 'tcx>, + Ty<'tcx>, + ty::PolyExistentialTraitRef<'tcx>) -> R, { // If we specified that this is an object method, then the // self-type ought to be something that can be dereferenced to @@ -288,7 +284,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { self.fcx.autoderef(self.span, self_ty) .filter_map(|(ty, _)| { match ty.sty { - ty::TyTrait(ref data) => Some(closure(self, ty, &data)), + ty::TyTrait(ref data) => Some(closure(self, ty, data.principal)), _ => None, } }) @@ -303,59 +299,52 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { fn instantiate_method_substs(&mut self, pick: &probe::Pick<'tcx>, - supplied_method_types: Vec>, - substs: subst::Substs<'tcx>) - -> subst::Substs<'tcx> + mut supplied_method_types: Vec>, + substs: &Substs<'tcx>) + -> &'tcx Substs<'tcx> { // Determine the values for the generic parameters of the method. // If they were not explicitly supplied, just construct fresh // variables. let num_supplied_types = supplied_method_types.len(); let method = pick.item.as_opt_method().unwrap(); - let method_types = method.generics.types.get_slice(subst::FnSpace); - let num_method_types = method_types.len(); - + let num_method_types = method.generics.types.len(); + + if num_supplied_types > 0 && num_supplied_types != num_method_types { + if num_method_types == 0 { + span_err!(self.tcx.sess, self.span, E0035, + "does not take type parameters"); + } else { + span_err!(self.tcx.sess, self.span, E0036, + "incorrect number of type parameters given for this method: \ + expected {}, found {}", + num_method_types, num_supplied_types); + } + supplied_method_types = vec![self.tcx.types.err; num_method_types]; + } // Create subst for early-bound lifetime parameters, combining // parameters from the type and those from the method. // // FIXME -- permit users to manually specify lifetimes - let method_regions = - self.region_vars_for_defs( - self.span, - pick.item.as_opt_method().unwrap() - .generics.regions.get_slice(subst::FnSpace)); - - let subst::Substs { types, regions } = substs; - let regions = regions.with_slice(subst::FnSpace, &method_regions); - let mut final_substs = subst::Substs { types: types, regions: regions }; - - if num_supplied_types == 0 { - self.type_vars_for_defs( - self.span, - subst::FnSpace, - &mut final_substs, - method_types); - } else if num_method_types == 0 { - span_err!(self.tcx.sess, self.span, E0035, - "does not take type parameters"); - self.type_vars_for_defs( - self.span, - subst::FnSpace, - &mut final_substs, - method_types); - } else if num_supplied_types != num_method_types { - span_err!(self.tcx.sess, self.span, E0036, - "incorrect number of type parameters given for this method: expected {}, found {}", - num_method_types, num_supplied_types); - final_substs.types.replace( - subst::FnSpace, - vec![self.tcx.types.err; num_method_types]); - } else { - final_substs.types.replace(subst::FnSpace, supplied_method_types); - } - - return final_substs; + let supplied_start = substs.params().len() + method.generics.regions.len(); + Substs::for_item(self.tcx, method.def_id, |def, _| { + let i = def.index as usize; + if i < substs.params().len() { + substs.region_at(i) + } else { + self.region_var_for_def(self.span, def) + } + }, |def, cur_substs| { + let i = def.index as usize; + if i < substs.params().len() { + substs.type_at(i) + } else if supplied_method_types.is_empty() { + self.type_var_for_def(self.span, def, cur_substs) + } else { + supplied_method_types[i - supplied_start] + } + }) } fn unify_receivers(&mut self, @@ -382,7 +371,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { fn instantiate_method_sig(&mut self, pick: &probe::Pick<'tcx>, - all_substs: subst::Substs<'tcx>) + all_substs: &'tcx Substs<'tcx>) -> InstantiatedMethodSig<'tcx> { debug!("instantiate_method_sig(pick={:?}, all_substs={:?})", @@ -393,7 +382,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { // type/early-bound-regions substitutions performed. There can // be no late-bound regions appearing here. let method_predicates = pick.item.as_opt_method().unwrap() - .predicates.instantiate(self.tcx, &all_substs); + .predicates.instantiate(self.tcx, all_substs); let method_predicates = self.normalize_associated_types_in(self.span, &method_predicates); @@ -411,20 +400,19 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { debug!("late-bound lifetimes from method instantiated, method_sig={:?}", method_sig); - let method_sig = self.instantiate_type_scheme(self.span, &all_substs, &method_sig); + let method_sig = self.instantiate_type_scheme(self.span, all_substs, &method_sig); debug!("type scheme substituted, method_sig={:?}", method_sig); InstantiatedMethodSig { method_sig: method_sig, - all_substs: all_substs, method_predicates: method_predicates, } } fn add_obligations(&mut self, fty: Ty<'tcx>, - all_substs: &subst::Substs<'tcx>, + all_substs: &Substs<'tcx>, method_predicates: &ty::InstantiatedPredicates<'tcx>) { debug!("add_obligations: fty={:?} all_substs={:?} method_predicates={:?}", fty, @@ -484,7 +472,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { i, expr, autoderef_count); if autoderef_count > 0 { - let mut autoderef = self.autoderef(expr.span, self.expr_ty(expr)); + let mut autoderef = self.autoderef(expr.span, self.node_ty(expr.id)); autoderef.nth(autoderef_count).unwrap_or_else(|| { span_bug!(expr.span, "expr was deref-able {} times but now isn't?", autoderef_count); @@ -513,7 +501,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { assert!(adr.unsize.is_none()); (adr.autoderefs, None) } - Some(AutoPtr(_, _)) => { + Some(AutoPtr(..)) => { (adr.autoderefs, adr.unsize.map(|target| { target.builtin_deref(false, NoPreference) .expect("fixup: AutoPtr is not &T").ty @@ -544,7 +532,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { unsize: None }))), false) }; - let index_expr_ty = self.expr_ty(&index_expr); + let index_expr_ty = self.node_ty(index_expr.id); let result = self.try_index_step( ty::MethodCall::expr(expr.id), @@ -559,7 +547,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { if let Some((input_ty, return_ty)) = result { self.demand_suptype(index_expr.span, input_ty, index_expr_ty); - let expr_ty = self.expr_ty(&expr); + let expr_ty = self.node_ty(expr.id); self.demand_suptype(expr.span, expr_ty, return_ty); } } @@ -570,7 +558,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { if self.tables.borrow().method_map.contains_key(&method_call) { let method = self.try_overloaded_deref(expr.span, Some(&base_expr), - self.expr_ty(&base_expr), + self.node_ty(base_expr.id), PreferMutLvalue); let method = method.expect("re-trying deref failed"); self.tables.borrow_mut().method_map.insert(method_call, method); diff --git a/src/librustc_typeck/check/method/mod.rs b/src/librustc_typeck/check/method/mod.rs index e6401be5b3..73caf79c9f 100644 --- a/src/librustc_typeck/check/method/mod.rs +++ b/src/librustc_typeck/check/method/mod.rs @@ -13,7 +13,7 @@ use check::FnCtxt; use hir::def::Def; use hir::def_id::DefId; -use rustc::ty::subst; +use rustc::ty::subst::Substs; use rustc::traits; use rustc::ty::{self, ToPredicate, ToPolyTraitRef, TraitRef, TypeFoldable}; use rustc::ty::adjustment::{AdjustDerefRef, AutoDerefRef, AutoPtr}; @@ -182,31 +182,25 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let trait_def = self.tcx.lookup_trait_def(trait_def_id); - let type_parameter_defs = trait_def.generics.types.get_slice(subst::TypeSpace); - let expected_number_of_input_types = type_parameter_defs.len(); - - assert_eq!(trait_def.generics.types.len(subst::FnSpace), 0); + if let Some(ref input_types) = opt_input_types { + assert_eq!(trait_def.generics.types.len() - 1, input_types.len()); + } assert!(trait_def.generics.regions.is_empty()); // Construct a trait-reference `self_ty : Trait` - let mut substs = subst::Substs::new_trait(Vec::new(), Vec::new(), self_ty); - - match opt_input_types { - Some(input_types) => { - assert_eq!(expected_number_of_input_types, input_types.len()); - substs.types.replace(subst::ParamSpace::TypeSpace, input_types); + let substs = Substs::for_item(self.tcx, trait_def_id, |def, _| { + self.region_var_for_def(span, def) + }, |def, substs| { + if def.index == 0 { + self_ty + } else if let Some(ref input_types) = opt_input_types { + input_types[def.index as usize - 1] + } else { + self.type_var_for_def(span, def, substs) } + }); - None => { - self.type_vars_for_defs( - span, - subst::ParamSpace::TypeSpace, - &mut substs, - type_parameter_defs); - } - } - - let trait_ref = ty::TraitRef::new(trait_def_id, self.tcx.mk_substs(substs)); + let trait_ref = ty::TraitRef::new(trait_def_id, substs); // Construct an obligation let poly_trait_ref = trait_ref.to_poly_trait_ref(); @@ -224,10 +218,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Trait must have a method named `m_name` and it should not have // type parameters or early-bound regions. let tcx = self.tcx; - let method_item = self.trait_item(trait_def_id, m_name).unwrap(); + let method_item = self.impl_or_trait_item(trait_def_id, m_name).unwrap(); let method_ty = method_item.as_opt_method().unwrap(); - assert_eq!(method_ty.generics.types.len(subst::FnSpace), 0); - assert_eq!(method_ty.generics.regions.len(subst::FnSpace), 0); + assert_eq!(method_ty.generics.types.len(), 0); + assert_eq!(method_ty.generics.regions.len(), 0); debug!("lookup_in_trait_adjusted: method_item={:?} method_ty={:?}", method_item, method_ty); @@ -365,29 +359,16 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Ok(def) } - /// Find item with name `item_name` defined in `trait_def_id` - /// and return it, or `None`, if no such item. - pub fn trait_item(&self, - trait_def_id: DefId, - item_name: ast::Name) - -> Option> - { - let trait_items = self.tcx.trait_items(trait_def_id); - trait_items.iter() - .find(|item| item.name() == item_name) - .cloned() - } - - pub fn impl_item(&self, - impl_def_id: DefId, - item_name: ast::Name) - -> Option> + /// Find item with name `item_name` defined in impl/trait `def_id` + /// and return it, or `None`, if no such item was defined there. + pub fn impl_or_trait_item(&self, + def_id: DefId, + item_name: ast::Name) + -> Option> { - let impl_items = self.tcx.impl_items.borrow(); - let impl_items = impl_items.get(&impl_def_id).unwrap(); - impl_items + self.tcx.impl_or_trait_items(def_id) .iter() - .map(|&did| self.tcx.impl_or_trait_item(did.def_id())) + .map(|&did| self.tcx.impl_or_trait_item(did)) .find(|m| m.name() == item_name) } } diff --git a/src/librustc_typeck/check/method/probe.rs b/src/librustc_typeck/check/method/probe.rs index 99f1b13d4e..9fba9bcb75 100644 --- a/src/librustc_typeck/check/method/probe.rs +++ b/src/librustc_typeck/check/method/probe.rs @@ -16,15 +16,14 @@ use super::suggest; use check::{FnCtxt}; use hir::def_id::DefId; use hir::def::Def; -use rustc::ty::subst; -use rustc::ty::subst::Subst; +use rustc::ty::subst::{Subst, Substs}; use rustc::traits; use rustc::ty::{self, Ty, ToPolyTraitRef, TraitRef, TypeFoldable}; use rustc::infer::{InferOk, TypeOrigin}; +use rustc::util::nodemap::FnvHashSet; use syntax::ast; use syntax_pos::{Span, DUMMY_SP}; use rustc::hir; -use std::collections::HashSet; use std::mem; use std::ops::Deref; use std::rc::Rc; @@ -41,7 +40,7 @@ struct ProbeContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { opt_simplified_steps: Option>, inherent_candidates: Vec>, extension_candidates: Vec>, - impl_dups: HashSet, + impl_dups: FnvHashSet, import_id: Option, /// Collects near misses when the candidate functions are missing a `self` keyword and is only @@ -80,9 +79,9 @@ struct Candidate<'tcx> { #[derive(Debug)] enum CandidateKind<'tcx> { - InherentImplCandidate(subst::Substs<'tcx>, + InherentImplCandidate(&'tcx Substs<'tcx>, /* Normalize obligations */ Vec>), - ExtensionImplCandidate(/* Impl */ DefId, subst::Substs<'tcx>, + ExtensionImplCandidate(/* Impl */ DefId, &'tcx Substs<'tcx>, /* Normalize obligations */ Vec>), ObjectCandidate, TraitCandidate, @@ -256,7 +255,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { item_name: item_name, inherent_candidates: Vec::new(), extension_candidates: Vec::new(), - impl_dups: HashSet::new(), + impl_dups: FnvHashSet(), import_id: None, steps: Rc::new(steps), opt_simplified_steps: opt_simplified_steps, @@ -290,11 +289,10 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { match self_ty.sty { ty::TyTrait(box ref data) => { - self.assemble_inherent_candidates_from_object(self_ty, data); - self.assemble_inherent_impl_candidates_for_type(data.principal_def_id()); + self.assemble_inherent_candidates_from_object(self_ty, data.principal); + self.assemble_inherent_impl_candidates_for_type(data.principal.def_id()); } - ty::TyEnum(def, _) | - ty::TyStruct(def, _) => { + ty::TyAdt(def, _) => { self.assemble_inherent_impl_candidates_for_type(def.did); } ty::TyBox(_) => { @@ -405,7 +403,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { debug!("assemble_inherent_impl_probe {:?}", impl_def_id); - let item = match self.impl_item(impl_def_id) { + let item = match self.impl_or_trait_item(impl_def_id) { Some(m) => m, None => { return; } // No method with correct name on this impl }; @@ -421,10 +419,10 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { } let (impl_ty, impl_substs) = self.impl_ty_and_substs(impl_def_id); - let impl_ty = impl_ty.subst(self.tcx, &impl_substs); + let impl_ty = impl_ty.subst(self.tcx, impl_substs); // Determine the receiver type that the method itself expects. - let xform_self_ty = self.xform_self_ty(&item, impl_ty, &impl_substs); + let xform_self_ty = self.xform_self_ty(&item, impl_ty, impl_substs); // We can't use normalize_associated_types_in as it will pollute the // fcx's fulfillment context after this probe is over. @@ -445,7 +443,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { fn assemble_inherent_candidates_from_object(&mut self, self_ty: Ty<'tcx>, - data: &ty::TraitTy<'tcx>) { + principal: ty::PolyExistentialTraitRef<'tcx>) { debug!("assemble_inherent_candidates_from_object(self_ty={:?})", self_ty); @@ -456,7 +454,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { // a substitution that replaces `Self` with the object type // itself. Hence, a `&self` method will wind up with an // argument type like `&Trait`. - let trait_ref = data.principal_trait_ref_with_self_ty(self.tcx, self_ty); + let trait_ref = principal.with_self_ty(self.tcx, self_ty); self.elaborate_bounds(&[trait_ref], |this, new_trait_ref, item| { let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref); @@ -497,7 +495,6 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | ty::Predicate::ClosureKind(..) | - ty::Predicate::Rfc1592(..) | ty::Predicate::TypeOutlives(..) => { None } @@ -519,14 +516,10 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { trait_ref, trait_ref.substs, m); - assert_eq!(m.generics.types.get_slice(subst::TypeSpace).len(), - trait_ref.substs.types.get_slice(subst::TypeSpace).len()); - assert_eq!(m.generics.regions.get_slice(subst::TypeSpace).len(), - trait_ref.substs.regions.get_slice(subst::TypeSpace).len()); - assert_eq!(m.generics.types.get_slice(subst::SelfSpace).len(), - trait_ref.substs.types.get_slice(subst::SelfSpace).len()); - assert_eq!(m.generics.regions.get_slice(subst::SelfSpace).len(), - trait_ref.substs.regions.get_slice(subst::SelfSpace).len()); + assert_eq!(m.generics.parent_types as usize, + trait_ref.substs.types().count()); + assert_eq!(m.generics.parent_regions as usize, + trait_ref.substs.regions().count()); } // Because this trait derives from a where-clause, it @@ -534,7 +527,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { // artifacts. This means it is safe to put into the // `WhereClauseCandidate` and (eventually) into the // `WhereClausePick`. - assert!(!trait_ref.substs.types.needs_infer()); + assert!(!trait_ref.substs.needs_infer()); this.inherent_candidates.push(Candidate { xform_self_ty: xform_self_ty, @@ -562,7 +555,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { let tcx = self.tcx; for bound_trait_ref in traits::transitive_bounds(tcx, bounds) { - let item = match self.trait_item(bound_trait_ref.def_id()) { + let item = match self.impl_or_trait_item(bound_trait_ref.def_id()) { Some(v) => v, None => { continue; } }; @@ -579,7 +572,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { expr_id: ast::NodeId) -> Result<(), MethodError<'tcx>> { - let mut duplicates = HashSet::new(); + let mut duplicates = FnvHashSet(); let opt_applicable_traits = self.tcx.trait_map.get(&expr_id); if let Some(applicable_traits) = opt_applicable_traits { for trait_candidate in applicable_traits { @@ -596,7 +589,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { } fn assemble_extension_candidates_for_all_traits(&mut self) -> Result<(), MethodError<'tcx>> { - let mut duplicates = HashSet::new(); + let mut duplicates = FnvHashSet(); for trait_info in suggest::all_traits(self.ccx) { if duplicates.insert(trait_info.def_id) { self.assemble_extension_candidates_for_trait(trait_info.def_id)?; @@ -665,7 +658,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { let impl_trait_ref = self.tcx.impl_trait_ref(impl_def_id) .unwrap() // we know this is a trait impl - .subst(self.tcx, &impl_substs); + .subst(self.tcx, impl_substs); debug!("impl_trait_ref={:?}", impl_trait_ref); @@ -753,14 +746,19 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { // for the purposes of our method lookup, we only take // receiver type into account, so we can just substitute // fresh types here to use during substitution and subtyping. - let trait_def = self.tcx.lookup_trait_def(trait_def_id); - let substs = self.fresh_substs_for_trait(self.span, - &trait_def.generics, - step.self_ty); + let substs = Substs::for_item(self.tcx, trait_def_id, |def, _| { + self.region_var_for_def(self.span, def) + }, |def, substs| { + if def.index == 0 { + step.self_ty + } else { + self.type_var_for_def(self.span, def, substs) + } + }); let xform_self_ty = self.xform_self_ty(&item, step.self_ty, - &substs); + substs); self.inherent_candidates.push(Candidate { xform_self_ty: xform_self_ty, item: item.clone(), @@ -799,7 +797,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { let trait_predicates = self.tcx.lookup_predicates(def_id); let bounds = trait_predicates.instantiate(self.tcx, substs); - let predicates = bounds.predicates.into_vec(); + let predicates = bounds.predicates; debug!("assemble_projection_candidates: predicates={:?}", predicates); for poly_bound in @@ -1192,7 +1190,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { fn xform_self_ty(&self, item: &ty::ImplOrTraitItem<'tcx>, impl_ty: Ty<'tcx>, - substs: &subst::Substs<'tcx>) + substs: &Substs<'tcx>) -> Ty<'tcx> { match item.as_opt_method() { @@ -1205,7 +1203,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { fn xform_method_self_ty(&self, method: &Rc>, impl_ty: Ty<'tcx>, - substs: &subst::Substs<'tcx>) + substs: &Substs<'tcx>) -> Ty<'tcx> { debug!("xform_self_ty(impl_ty={:?}, self_ty={:?}, substs={:?})", @@ -1220,64 +1218,54 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { // are given do not include type/lifetime parameters for the // method yet. So create fresh variables here for those too, // if there are any. - assert_eq!(substs.types.len(subst::FnSpace), 0); - assert_eq!(substs.regions.len(subst::FnSpace), 0); + assert_eq!(substs.types().count(), method.generics.parent_types as usize); + assert_eq!(substs.regions().count(), method.generics.parent_regions as usize); if self.mode == Mode::Path { return impl_ty; } - let mut placeholder; - let mut substs = substs; - if - !method.generics.types.is_empty_in(subst::FnSpace) || - !method.generics.regions.is_empty_in(subst::FnSpace) - { - // In general, during probe we erase regions. See - // `impl_self_ty()` for an explanation. - let method_regions = - method.generics.regions.get_slice(subst::FnSpace) - .iter() - .map(|_| ty::ReErased) - .collect(); - - placeholder = (*substs).clone().with_method(Vec::new(), method_regions); - - self.type_vars_for_defs( - self.span, - subst::FnSpace, - &mut placeholder, - method.generics.types.get_slice(subst::FnSpace)); - - substs = &placeholder; - } - // Erase any late-bound regions from the method and substitute // in the values from the substitution. let xform_self_ty = method.fty.sig.input(0); let xform_self_ty = self.erase_late_bound_regions(&xform_self_ty); - let xform_self_ty = xform_self_ty.subst(self.tcx, substs); - xform_self_ty + if method.generics.types.is_empty() && method.generics.regions.is_empty() { + xform_self_ty.subst(self.tcx, substs) + } else { + let substs = Substs::for_item(self.tcx, method.def_id, |def, _| { + let i = def.index as usize; + if i < substs.params().len() { + substs.region_at(i) + } else { + // In general, during probe we erase regions. See + // `impl_self_ty()` for an explanation. + self.tcx.mk_region(ty::ReErased) + } + }, |def, cur_substs| { + let i = def.index as usize; + if i < substs.params().len() { + substs.type_at(i) + } else { + self.type_var_for_def(self.span, def, cur_substs) + } + }); + xform_self_ty.subst(self.tcx, substs) + } } /// Get the type of an impl and generate substitutions with placeholders. fn impl_ty_and_substs(&self, impl_def_id: DefId) - -> (Ty<'tcx>, subst::Substs<'tcx>) + -> (Ty<'tcx>, &'tcx Substs<'tcx>) { - let impl_pty = self.tcx.lookup_item_type(impl_def_id); + let impl_ty = self.tcx.lookup_item_type(impl_def_id).ty; - let type_vars = - impl_pty.generics.types.map( - |_| self.next_ty_var()); + let substs = Substs::for_item(self.tcx, impl_def_id, + |_, _| self.tcx.mk_region(ty::ReErased), + |_, _| self.next_ty_var()); - let region_placeholders = - impl_pty.generics.regions.map( - |_| ty::ReErased); // see erase_late_bound_regions() for an expl of why 'erased - - let substs = subst::Substs::new(type_vars, region_placeholders); - (impl_pty.ty, substs) + (impl_ty, substs) } /// Replace late-bound-regions bound by `value` with `'static` using @@ -1304,18 +1292,12 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { self.tcx.erase_late_bound_regions(value) } - fn impl_item(&self, impl_def_id: DefId) - -> Option> - { - self.fcx.impl_item(impl_def_id, self.item_name) - } - - /// Find item with name `item_name` defined in `trait_def_id` - /// and return it, or `None`, if no such item. - fn trait_item(&self, trait_def_id: DefId) - -> Option> + /// Find item with name `item_name` defined in impl/trait `def_id` + /// and return it, or `None`, if no such item was defined there. + fn impl_or_trait_item(&self, def_id: DefId) + -> Option> { - self.fcx.trait_item(trait_def_id, self.item_name) + self.fcx.impl_or_trait_item(def_id, self.item_name) } } @@ -1324,8 +1306,8 @@ impl<'tcx> Candidate<'tcx> { Pick { item: self.item.clone(), kind: match self.kind { - InherentImplCandidate(_, _) => InherentImplPick, - ExtensionImplCandidate(def_id, _, _) => { + InherentImplCandidate(..) => InherentImplPick, + ExtensionImplCandidate(def_id, ..) => { ExtensionImplPick(def_id) } ObjectCandidate => ObjectPick, @@ -1336,7 +1318,7 @@ impl<'tcx> Candidate<'tcx> { // inference variables or other artifacts. This // means they are safe to put into the // `WhereClausePick`. - assert!(!trait_ref.substs().types.needs_infer()); + assert!(!trait_ref.substs().needs_infer()); WhereClausePick(trait_ref.clone()) } @@ -1350,10 +1332,10 @@ impl<'tcx> Candidate<'tcx> { fn to_source(&self) -> CandidateSource { match self.kind { - InherentImplCandidate(_, _) => { + InherentImplCandidate(..) => { ImplSource(self.item.container().id()) } - ExtensionImplCandidate(def_id, _, _) => ImplSource(def_id), + ExtensionImplCandidate(def_id, ..) => ImplSource(def_id), ObjectCandidate | TraitCandidate | WhereClauseCandidate(_) => TraitSource(self.item.container().id()), diff --git a/src/librustc_typeck/check/method/suggest.rs b/src/librustc_typeck/check/method/suggest.rs index 5452178247..34bcd2ba04 100644 --- a/src/librustc_typeck/check/method/suggest.rs +++ b/src/librustc_typeck/check/method/suggest.rs @@ -16,9 +16,8 @@ use CrateCtxt; use check::{FnCtxt}; use rustc::hir::map as hir_map; use rustc::ty::{self, Ty, ToPolyTraitRef, ToPredicate, TypeFoldable}; -use middle::cstore; use hir::def::Def; -use hir::def_id::DefId; +use hir::def_id::{CRATE_DEF_INDEX, DefId}; use middle::lang_items::FnOnceTraitLangItem; use rustc::ty::subst::Substs; use rustc::traits::{Obligation, SelectionContext}; @@ -54,10 +53,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.autoderef(span, ty).any(|(ty, _)| self.probe(|_| { let fn_once_substs = - Substs::new_trait(vec![self.next_ty_var()], vec![], ty); - let trait_ref = - ty::TraitRef::new(fn_once, - tcx.mk_substs(fn_once_substs)); + Substs::new_trait(tcx, ty, &[self.next_ty_var()]); + let trait_ref = ty::TraitRef::new(fn_once, fn_once_substs); let poly_trait_ref = trait_ref.to_poly_trait_ref(); let obligation = Obligation::misc(span, self.body_id, @@ -94,9 +91,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { CandidateSource::ImplSource(impl_did) => { // Provide the best span we can. Use the item, if local to crate, else // the impl, if local to crate (item may be defaulted), else nothing. - let item = self.impl_item(impl_did, item_name) + let item = self.impl_or_trait_item(impl_did, item_name) .or_else(|| { - self.trait_item( + self.impl_or_trait_item( self.tcx.impl_trait_ref(impl_did).unwrap().def_id, item_name @@ -129,7 +126,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } CandidateSource::TraitSource(trait_did) => { - let item = self.trait_item(trait_did, item_name).unwrap(); + let item = self.impl_or_trait_item(trait_did, item_name).unwrap(); let item_span = self.tcx.map.def_id_span(item.def_id(), span); span_note!(err, item_span, "candidate #{} is defined in the trait `{}`", @@ -166,30 +163,34 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // give a helping note that it has to be called as (x.f)(...). if let Some(expr) = rcvr_expr { for (ty, _) in self.autoderef(span, rcvr_ty) { - if let ty::TyStruct(def, substs) = ty.sty { - if let Some(field) = def.struct_variant().find_field_named(item_name) { - let snippet = tcx.sess.codemap().span_to_snippet(expr.span); - let expr_string = match snippet { - Ok(expr_string) => expr_string, - _ => "s".into() // Default to a generic placeholder for the - // expression when we can't generate a - // string snippet - }; - - let field_ty = field.ty(tcx, substs); - - if self.is_fn_ty(&field_ty, span) { - err.span_note(span, &format!( - "use `({0}.{1})(...)` if you meant to call the function \ - stored in the `{1}` field", - expr_string, item_name)); - } else { - err.span_note(span, &format!( - "did you mean to write `{0}.{1}`?", - expr_string, item_name)); + match ty.sty { + ty::TyAdt(def, substs) if !def.is_enum() => { + if let Some(field) = def.struct_variant(). + find_field_named(item_name) { + let snippet = tcx.sess.codemap().span_to_snippet(expr.span); + let expr_string = match snippet { + Ok(expr_string) => expr_string, + _ => "s".into() // Default to a generic placeholder for the + // expression when we can't generate a + // string snippet + }; + + let field_ty = field.ty(tcx, substs); + + if self.is_fn_ty(&field_ty, span) { + err.span_note(span, &format!( + "use `({0}.{1})(...)` if you meant to call the \ + function stored in the `{1}` field", + expr_string, item_name)); + } else { + err.span_note(span, &format!( + "did you mean to write `{0}.{1}`?", + expr_string, item_name)); + } + break; } - break; } + _ => {} } } } @@ -244,6 +245,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { MethodError::Ambiguity(sources) => { let mut err = struct_span_err!(self.sess(), span, E0034, "multiple applicable items in scope"); + err.span_label(span, &format!("multiple `{}` found", item_name)); report_candidates(&mut err, sources); err.emit(); @@ -318,7 +320,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // implementing a trait would be legal but is rejected // here). (type_is_local || info.def_id.is_local()) - && self.trait_item(info.def_id, item_name).is_some() + && self.impl_or_trait_item(info.def_id, item_name).is_some() }) .collect::>(); @@ -356,9 +358,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { rcvr_expr: Option<&hir::Expr>) -> bool { fn is_local(ty: Ty) -> bool { match ty.sty { - ty::TyEnum(def, _) | ty::TyStruct(def, _) => def.did.is_local(), + ty::TyAdt(def, _) => def.did.is_local(), - ty::TyTrait(ref tr) => tr.principal_def_id().is_local(), + ty::TyTrait(ref tr) => tr.principal.def_id().is_local(), ty::TyParam(_) => true, @@ -446,34 +448,30 @@ pub fn all_traits<'a>(ccx: &'a CrateCtxt) -> AllTraits<'a> { // Cross-crate: let mut external_mods = FnvHashSet(); - fn handle_external_def(traits: &mut AllTraitsVec, + fn handle_external_def(ccx: &CrateCtxt, + traits: &mut AllTraitsVec, external_mods: &mut FnvHashSet, - ccx: &CrateCtxt, - cstore: &for<'a> cstore::CrateStore<'a>, - dl: cstore::DefLike) { - match dl { - cstore::DlDef(Def::Trait(did)) => { - traits.push(TraitInfo::new(did)); + def_id: DefId) { + match ccx.tcx.sess.cstore.describe_def(def_id) { + Some(Def::Trait(_)) => { + traits.push(TraitInfo::new(def_id)); } - cstore::DlDef(Def::Mod(did)) => { - if !external_mods.insert(did) { + Some(Def::Mod(_)) => { + if !external_mods.insert(def_id) { return; } - for child in cstore.item_children(did) { - handle_external_def(traits, external_mods, - ccx, cstore, child.def) + for child in ccx.tcx.sess.cstore.item_children(def_id) { + handle_external_def(ccx, traits, external_mods, child.def_id) } } _ => {} } } - let cstore = &*ccx.tcx.sess.cstore; - for cnum in ccx.tcx.sess.cstore.crates() { - for child in cstore.crate_top_level_items(cnum) { - handle_external_def(&mut traits, &mut external_mods, - ccx, cstore, child.def) - } + handle_external_def(ccx, &mut traits, &mut external_mods, DefId { + krate: cnum, + index: CRATE_DEF_INDEX + }); } *ccx.all_traits.borrow_mut() = Some(traits); diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 5dae28c12a..8151849613 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -83,14 +83,12 @@ use self::TupleArgumentsFlag::*; use astconv::{AstConv, ast_region_to_region, PathParamMode}; use dep_graph::DepNode; use fmt_macros::{Parser, Piece, Position}; -use middle::cstore::LOCAL_CRATE; use hir::def::{Def, PathResolution}; -use hir::def_id::DefId; +use hir::def_id::{DefId, LOCAL_CRATE}; use hir::pat_util; use rustc::infer::{self, InferCtxt, InferOk, TypeOrigin, TypeTrace, type_variable}; -use rustc::ty::subst::{self, Subst, Substs, VecPerParamSpace, ParamSpace}; +use rustc::ty::subst::{Subst, Substs}; use rustc::traits::{self, Reveal}; -use rustc::ty::{GenericPredicates, TypeScheme}; use rustc::ty::{ParamTy, ParameterEnvironment}; use rustc::ty::{LvaluePreference, NoPreference, PreferMutLvalue}; use rustc::ty::{self, ToPolyTraitRef, Ty, TyCtxt, Visibility}; @@ -105,23 +103,20 @@ use CrateCtxt; use TypeAndSubsts; use lint; use util::common::{block_query, ErrorReported, indenter, loop_query}; -use util::nodemap::{DefIdMap, FnvHashMap, NodeMap}; +use util::nodemap::{DefIdMap, FnvHashMap, FnvHashSet, NodeMap}; use std::cell::{Cell, Ref, RefCell}; -use std::collections::{HashSet}; use std::mem::replace; use std::ops::Deref; use syntax::abi::Abi; use syntax::ast; use syntax::attr; -use syntax::attr::AttrMetaMethods; use syntax::codemap::{self, Spanned}; use syntax::feature_gate::{GateIssue, emit_feature_err}; use syntax::parse::token::{self, InternedString, keywords}; use syntax::ptr::P; use syntax::util::lev_distance::find_best_match_for_name; use syntax_pos::{self, Span}; -use errors::DiagnosticBuilder; use rustc::hir::intravisit::{self, Visitor}; use rustc::hir::{self, PatKind}; @@ -508,10 +503,6 @@ pub fn check_item_bodies(ccx: &CrateCtxt) -> CompileResult { if let Err(errors) = fulfillment_cx.select_all_or_error(&infcx) { infcx.report_fulfillment_errors(&errors); } - - if let Err(errors) = fulfillment_cx.select_rfc1592_obligations(&infcx) { - infcx.report_fulfillment_errors_as_warnings(&errors, item_id); - } }); } }) @@ -543,7 +534,7 @@ fn check_bare_fn<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, fn_id: ast::NodeId) { let raw_fty = ccx.tcx.lookup_item_type(ccx.tcx.map.local_def_id(fn_id)).ty; let fn_ty = match raw_fty.sty { - ty::TyFnDef(_, _, f) => f, + ty::TyFnDef(.., f) => f, _ => span_bug!(body.span, "check_bare_fn: function type expected") }; @@ -720,16 +711,18 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, fcx } -pub fn check_struct(ccx: &CrateCtxt, id: ast::NodeId, span: Span) { - let tcx = ccx.tcx; - - check_representable(tcx, span, id, "struct"); +fn check_struct(ccx: &CrateCtxt, id: ast::NodeId, span: Span) { + check_representable(ccx.tcx, span, id); - if tcx.lookup_simd(ccx.tcx.map.local_def_id(id)) { - check_simd(tcx, span, id); + if ccx.tcx.lookup_simd(ccx.tcx.map.local_def_id(id)) { + check_simd(ccx.tcx, span, id); } } +fn check_union(ccx: &CrateCtxt, id: ast::NodeId, span: Span) { + check_representable(ccx.tcx, span, id); +} + pub fn check_item_type<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) { debug!("check_item_type(it.id={}, it.name={})", it.id, @@ -737,7 +730,7 @@ pub fn check_item_type<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) { let _indenter = indenter(); match it.node { // Consts can play a role in type-checking, so they are included here. - hir::ItemStatic(_, _, ref e) | + hir::ItemStatic(.., ref e) | hir::ItemConst(_, ref e) => check_const(ccx, &e, it.id), hir::ItemEnum(ref enum_definition, _) => { check_enum_variants(ccx, @@ -746,38 +739,35 @@ pub fn check_item_type<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) { it.id); } hir::ItemFn(..) => {} // entirely within check_item_body - hir::ItemImpl(_, _, _, _, _, ref impl_items) => { + hir::ItemImpl(.., ref impl_items) => { debug!("ItemImpl {} with id {}", it.name, it.id); let impl_def_id = ccx.tcx.map.local_def_id(it.id); match ccx.tcx.impl_trait_ref(impl_def_id) { Some(impl_trait_ref) => { - let trait_def_id = impl_trait_ref.def_id; - check_impl_items_against_trait(ccx, it.span, impl_def_id, &impl_trait_ref, impl_items); - check_on_unimplemented( - ccx, - &ccx.tcx.lookup_trait_def(trait_def_id).generics, - it, - ccx.tcx.item_name(trait_def_id)); + let trait_def_id = impl_trait_ref.def_id; + check_on_unimplemented(ccx, trait_def_id, it); } None => { } } } hir::ItemTrait(..) => { let def_id = ccx.tcx.map.local_def_id(it.id); - let generics = &ccx.tcx.lookup_trait_def(def_id).generics; - check_on_unimplemented(ccx, generics, it, it.name); + check_on_unimplemented(ccx, def_id, it); } hir::ItemStruct(..) => { check_struct(ccx, it.id, it.span); } + hir::ItemUnion(..) => { + check_union(ccx, it.id, it.span); + } hir::ItemTy(_, ref generics) => { let pty_ty = ccx.tcx.node_id_to_type(it.id); - check_bounds_are_used(ccx, &generics.ty_params, pty_ty); + check_bounds_are_used(ccx, generics, pty_ty); } hir::ItemForeignMod(ref m) => { if m.abi == Abi::RustIntrinsic { @@ -816,10 +806,10 @@ pub fn check_item_body<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) { ccx.tcx.item_path_str(ccx.tcx.map.local_def_id(it.id))); let _indenter = indenter(); match it.node { - hir::ItemFn(ref decl, _, _, _, _, ref body) => { + hir::ItemFn(ref decl, .., ref body) => { check_bare_fn(ccx, &decl, &body, it.id); } - hir::ItemImpl(_, _, _, _, _, ref impl_items) => { + hir::ItemImpl(.., ref impl_items) => { debug!("ItemImpl {} with id {}", it.name, it.id); for impl_item in impl_items { @@ -836,20 +826,16 @@ pub fn check_item_body<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) { } } } - hir::ItemTrait(_, _, _, ref trait_items) => { + hir::ItemTrait(.., ref trait_items) => { for trait_item in trait_items { match trait_item.node { hir::ConstTraitItem(_, Some(ref expr)) => { check_const(ccx, &expr, trait_item.id) } hir::MethodTraitItem(ref sig, Some(ref body)) => { - check_trait_fn_not_const(ccx, trait_item.span, sig.constness); - check_bare_fn(ccx, &sig.decl, body, trait_item.id); } - hir::MethodTraitItem(ref sig, None) => { - check_trait_fn_not_const(ccx, trait_item.span, sig.constness); - } + hir::MethodTraitItem(_, None) | hir::ConstTraitItem(_, None) | hir::TypeTraitItem(..) => { // Nothing to do. @@ -861,26 +847,10 @@ pub fn check_item_body<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) { } } -fn check_trait_fn_not_const<'a,'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - span: Span, - constness: hir::Constness) -{ - match constness { - hir::Constness::NotConst => { - // good - } - hir::Constness::Const => { - struct_span_err!(ccx.tcx.sess, span, E0379, "trait fns cannot be declared const") - .span_label(span, &format!("trait fns cannot be const")) - .emit() - } - } -} - fn check_on_unimplemented<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - generics: &ty::Generics, - item: &hir::Item, - name: ast::Name) { + def_id: DefId, + item: &hir::Item) { + let generics = ccx.tcx.lookup_generics(def_id); if let Some(ref attr) = item.attrs.iter().find(|a| { a.check_name("rustc_on_unimplemented") }) { @@ -899,6 +869,7 @@ fn check_on_unimplemented<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, }) { Some(_) => (), None => { + let name = ccx.tcx.item_name(def_id); span_err!(ccx.tcx.sess, attr.span, E0230, "there is no type parameter \ {} on trait {}", @@ -915,9 +886,12 @@ fn check_on_unimplemented<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, } } } else { - span_err!(ccx.tcx.sess, attr.span, E0232, - "this attribute must have a value, \ - eg `#[rustc_on_unimplemented = \"foo\"]`") + struct_span_err!( + ccx.tcx.sess, attr.span, E0232, + "this attribute must have a value") + .span_label(attr.span, &format!("attribute requires a value")) + .note(&format!("eg `#[rustc_on_unimplemented = \"foo\"]`")) + .emit(); } } } @@ -928,14 +902,17 @@ fn report_forbidden_specialization<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, { let mut err = struct_span_err!( tcx.sess, impl_item.span, E0520, - "item `{}` is provided by an `impl` that specializes \ - another, but the item in the parent `impl` is not \ - marked `default` and so it cannot be specialized.", + "`{}` specializes an item from a parent `impl`, but \ + that item is not marked `default`", impl_item.name); + err.span_label(impl_item.span, &format!("cannot specialize default item `{}`", + impl_item.name)); match tcx.span_of_impl(parent_impl) { Ok(span) => { - err.span_note(span, "parent implementation is here:"); + err.span_label(span, &"parent `impl` is here"); + err.note(&format!("to specialize, `{}` in the parent `impl` must be marked `default`", + impl_item.name)); } Err(cname) => { err.note(&format!("parent implementation is in crate `{}`", cname)); @@ -1025,26 +1002,26 @@ fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, // We can only get the spans from local trait definition // Same for E0324 and E0325 if let Some(trait_span) = tcx.map.span_if_local(ty_trait_item.def_id()) { - err.span_label(trait_span, &format!("original trait requirement")); + err.span_label(trait_span, &format!("item in trait")); } err.emit() } } - hir::ImplItemKind::Method(ref sig, ref body) => { - check_trait_fn_not_const(ccx, impl_item.span, sig.constness); - + hir::ImplItemKind::Method(_, ref body) => { let impl_method = match ty_impl_item { ty::MethodTraitItem(ref mti) => mti, _ => span_bug!(impl_item.span, "non-method impl-item for method") }; + let trait_span = tcx.map.span_if_local(ty_trait_item.def_id()); if let &ty::MethodTraitItem(ref trait_method) = ty_trait_item { compare_impl_method(ccx, &impl_method, impl_item.span, body.id, &trait_method, - &impl_trait_ref); + &impl_trait_ref, + trait_span); } else { let mut err = struct_span_err!(tcx.sess, impl_item.span, E0324, "item `{}` is an associated method, \ @@ -1053,7 +1030,7 @@ fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, impl_trait_ref); err.span_label(impl_item.span, &format!("does not match trait")); if let Some(trait_span) = tcx.map.span_if_local(ty_trait_item.def_id()) { - err.span_label(trait_span, &format!("original trait requirement")); + err.span_label(trait_span, &format!("item in trait")); } err.emit() } @@ -1076,7 +1053,7 @@ fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, impl_trait_ref); err.span_label(impl_item.span, &format!("does not match trait")); if let Some(trait_span) = tcx.map.span_if_local(ty_trait_item.def_id()) { - err.span_label(trait_span, &format!("original trait requirement")); + err.span_label(trait_span, &format!("item in trait")); } err.emit() } @@ -1198,10 +1175,10 @@ fn check_const<'a, 'tcx>(ccx: &CrateCtxt<'a,'tcx>, /// Checks whether a type can be represented in memory. In particular, it /// identifies types that contain themselves without indirection through a /// pointer, which would mean their size is unbounded. -pub fn check_representable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - sp: Span, - item_id: ast::NodeId, - _designation: &str) -> bool { +fn check_representable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + sp: Span, + item_id: ast::NodeId) + -> bool { let rty = tcx.node_id_to_type(item_id); // Check that it is possible to represent this type. This call identifies @@ -1223,7 +1200,7 @@ pub fn check_representable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, pub fn check_simd<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, id: ast::NodeId) { let t = tcx.node_id_to_type(id); match t.sty { - ty::TyStruct(def, substs) => { + ty::TyAdt(def, substs) if def.is_struct() => { let fields = &def.struct_variant().fields; if fields.is_empty() { span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty"); @@ -1231,7 +1208,9 @@ pub fn check_simd<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, id: ast::Node } let e = fields[0].ty(tcx, substs); if !fields.iter().all(|f| f.ty(tcx, substs) == e) { - span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous"); + struct_span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous") + .span_label(sp, &format!("SIMD elements must have the same type")) + .emit(); return; } match e.sty { @@ -1257,8 +1236,11 @@ pub fn check_enum_variants<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, let hint = *ccx.tcx.lookup_repr_hints(def_id).get(0).unwrap_or(&attr::ReprAny); if hint != attr::ReprAny && vs.is_empty() { - span_err!(ccx.tcx.sess, sp, E0084, - "unsupported representation for zero-variant enum"); + struct_span_err!( + ccx.tcx.sess, sp, E0084, + "unsupported representation for zero-variant enum") + .span_label(sp, &format!("unsupported enum representation")) + .emit(); } let repr_type_ty = ccx.tcx.enum_repr_type(Some(&hint)).to_ty(ccx.tcx); @@ -1296,7 +1278,7 @@ pub fn check_enum_variants<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, disr_vals.push(current_disr_val); } - check_representable(ccx.tcx, sp, id, "enum"); + check_representable(ccx.tcx, sp, id); } impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { @@ -1306,6 +1288,12 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { &self.ast_ty_to_ty_cache } + fn get_generics(&self, _: Span, id: DefId) + -> Result<&'tcx ty::Generics<'tcx>, ErrorReported> + { + Ok(self.tcx().lookup_generics(id)) + } + fn get_item_type_scheme(&self, _: Span, id: DefId) -> Result, ErrorReported> { @@ -1339,7 +1327,7 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { .filter_map(|predicate| { match *predicate { ty::Predicate::Trait(ref data) => { - if data.0.self_ty().is_param(def.space, def.index) { + if data.0.self_ty().is_param(def.index) { Some(data.to_poly_trait_ref()) } else { None @@ -1359,31 +1347,23 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { assoc_name: ast::Name) -> bool { - let trait_def = self.tcx().lookup_trait_def(trait_def_id); - trait_def.associated_type_names.contains(&assoc_name) - } - - fn ty_infer(&self, - ty_param_def: Option>, - substs: Option<&mut subst::Substs<'tcx>>, - space: Option, - span: Span) -> Ty<'tcx> { - // Grab the default doing subsitution - let default = ty_param_def.and_then(|def| { - def.default.map(|ty| type_variable::Default { - ty: ty.subst_spanned(self.tcx(), substs.as_ref().unwrap(), Some(span)), - origin_span: span, - def_id: def.default_def_id - }) - }); + self.tcx().impl_or_trait_items(trait_def_id).iter().any(|&def_id| { + match self.tcx().impl_or_trait_item(def_id) { + ty::TypeTraitItem(ref item) => item.name == assoc_name, + _ => false + } + }) + } - let ty_var = self.next_ty_var_with_default(default); + fn ty_infer(&self, _span: Span) -> Ty<'tcx> { + self.next_ty_var() + } - // Finally we add the type variable to the substs - match substs { - None => ty_var, - Some(substs) => { substs.types.push(space.unwrap(), ty_var); ty_var } - } + fn ty_infer_for_def(&self, + ty_param_def: &ty::TypeParameterDef<'tcx>, + substs: &Substs<'tcx>, + span: Span) -> Ty<'tcx> { + self.type_var_for_def(span, ty_param_def, substs) } fn projected_ty_from_poly_trait_ref(&self, @@ -1428,13 +1408,13 @@ impl<'a, 'gcx, 'tcx> RegionScope for FnCtxt<'a, 'gcx, 'tcx> { // (and anyway, within a fn body the right region may not even // be something the user can write explicitly, since it might // be some expression). - self.next_region_var(infer::MiscVariable(span)) + *self.next_region_var(infer::MiscVariable(span)) } fn anon_regions(&self, span: Span, count: usize) -> Result, Option>> { Ok((0..count).map(|_| { - self.next_region_var(infer::MiscVariable(span)) + *self.next_region_var(infer::MiscVariable(span)) }).collect()) } } @@ -1472,12 +1452,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { writeback_errors: Cell::new(false), err_count_on_creation: inh.tcx.sess.err_count(), ret_ty: rty, - ps: RefCell::new(UnsafetyState::function(hir::Unsafety::Normal, 0)), + ps: RefCell::new(UnsafetyState::function(hir::Unsafety::Normal, + ast::CRATE_NODE_ID)), inh: inh, } } - pub fn param_env(&self) -> &ty::ParameterEnvironment<'tcx> { + pub fn param_env(&self) -> &ty::ParameterEnvironment<'gcx> { &self.parameter_environment } @@ -1544,9 +1525,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { match self.locals.borrow().get(&nid) { Some(&t) => t, None => { - span_err!(self.tcx.sess, span, E0513, - "no type for local variable {}", - nid); + struct_span_err!(self.tcx.sess, span, E0513, + "no type for local variable {}", + self.tcx.map.node_to_string(nid)) + .span_label(span, &"no type for variable") + .emit(); self.tcx.types.err } } @@ -1557,14 +1540,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { debug!("write_ty({}, {:?}) in fcx {}", node_id, ty, self.tag()); self.tables.borrow_mut().node_types.insert(node_id, ty); - - // Add adjustments to !-expressions - if ty.is_never() { - if let Some(hir::map::NodeExpr(_)) = self.tcx.map.find(node_id) { - let adj = adjustment::AdjustNeverToAny(self.next_diverging_ty_var()); - self.write_adjustment(node_id, adj); - } - } } pub fn write_substs(&self, node_id: ast::NodeId, substs: ty::ItemSubsts<'tcx>) { @@ -1630,8 +1605,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { bounds: &ty::GenericPredicates<'tcx>) -> ty::InstantiatedPredicates<'tcx> { + let result = bounds.instantiate(self.tcx, substs); + let result = self.normalize_associated_types_in(span, &result.predicates); + debug!("instantiate_bounds(bounds={:?}, substs={:?}) = {:?}", + bounds, + substs, + result); ty::InstantiatedPredicates { - predicates: self.instantiate_type_scheme(span, substs, &bounds.predicates) + predicates: result } } @@ -1707,27 +1688,24 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { node_id: ast::NodeId) -> Ty<'tcx> { debug!("instantiate_type_path(did={:?}, path={:?})", did, path); - let mut type_scheme = self.tcx.lookup_item_type(did); - if type_scheme.ty.is_fn() { + let mut ty = self.tcx.lookup_item_type(did).ty; + if ty.is_fn() { // Tuple variants have fn type even in type namespace, extract true variant type from it - let fn_ret = self.tcx.no_late_bound_regions(&type_scheme.ty.fn_ret()).unwrap(); - type_scheme = ty::TypeScheme { ty: fn_ret, generics: type_scheme.generics } + ty = self.tcx.no_late_bound_regions(&ty.fn_ret()).unwrap(); } let type_predicates = self.tcx.lookup_predicates(did); let substs = AstConv::ast_path_substs_for_ty(self, self, path.span, PathParamMode::Optional, - &type_scheme.generics, + did, path.segments.last().unwrap()); - let substs = self.tcx.mk_substs(substs); - debug!("instantiate_type_path: ty={:?} substs={:?}", &type_scheme.ty, substs); + debug!("instantiate_type_path: ty={:?} substs={:?}", ty, substs); let bounds = self.instantiate_bounds(path.span, substs, &type_predicates); let cause = traits::ObligationCause::new(path.span, self.body_id, traits::ItemObligation(did)); self.add_obligations_for_parameters(cause, &bounds); - let ty_substituted = self.instantiate_type_scheme(path.span, substs, &type_scheme.ty); - self.write_ty(node_id, ty_substituted); + let ty_substituted = self.instantiate_type_scheme(path.span, substs, &ty); self.write_substs(node_id, ty::ItemSubsts { substs: substs }); @@ -1766,13 +1744,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.require_type_meets(ty, span, code, ty::BoundSized); } - pub fn require_expr_have_sized_type(&self, - expr: &hir::Expr, - code: traits::ObligationCauseCode<'tcx>) - { - self.require_type_is_sized(self.expr_ty(expr), expr.span, code); - } - pub fn register_builtin_bound(&self, ty: Ty<'tcx>, builtin_bound: ty::BuiltinBound, @@ -1798,26 +1769,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { t } - pub fn expr_ty(&self, ex: &hir::Expr) -> Ty<'tcx> { - if let Some(&adjustment::AdjustNeverToAny(ref t)) - = self.tables.borrow().adjustments.get(&ex.id) { - return t; - } - match self.tables.borrow().node_types.get(&ex.id) { - Some(&t) => t, - None => { - bug!("no type for expr in fcx {}", self.tag()); - } - } - } - /// Apply `adjustment` to the type of `expr` pub fn adjust_expr_ty(&self, expr: &hir::Expr, adjustment: Option<&adjustment::AutoAdjustment<'tcx>>) -> Ty<'tcx> { - let raw_ty = self.expr_ty(expr); + let raw_ty = self.node_ty(expr.id); let raw_ty = self.shallow_resolve(raw_ty); let resolve_ty = |ty: Ty<'tcx>| self.resolve_type_vars_if_possible(&ty); raw_ty.adjust(self.tcx, expr.span, expr.id, adjustment, |method_call| { @@ -1864,7 +1822,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// outlive the region `r`. pub fn register_region_obligation(&self, ty: Ty<'tcx>, - region: ty::Region, + region: &'tcx ty::Region, cause: traits::ObligationCause<'tcx>) { let mut fulfillment_cx = self.fulfillment_cx.borrow_mut(); @@ -1895,13 +1853,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // // FIXME(#27579) all uses of this should be migrated to register_wf_obligation eventually let cause = traits::ObligationCause::new(span, self.body_id, code); - self.register_region_obligation(ty, ty::ReEmpty, cause); + self.register_region_obligation(ty, self.tcx.mk_region(ty::ReEmpty), cause); } /// Registers obligations that all types appearing in `substs` are well-formed. pub fn add_wf_bounds(&self, substs: &Substs<'tcx>, expr: &hir::Expr) { - for &ty in &substs.types { + for ty in substs.types() { self.register_wf_obligation(ty, expr.span, traits::MiscObligation); } } @@ -2047,7 +2005,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { .filter_map(|t| self.default(t).map(|d| (t, d))) .collect(); - let mut unbound_tyvars = HashSet::new(); + let mut unbound_tyvars = FnvHashSet(); debug!("select_all_obligations_and_apply_defaults: defaults={:?}", default_map); @@ -2164,7 +2122,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { .unwrap_or(type_variable::Default { ty: self.next_ty_var(), origin_span: syntax_pos::DUMMY_SP, - def_id: self.tcx.map.local_def_id(0) // what do I put here? + // what do I put here? + def_id: self.tcx.map.local_def_id(ast::CRATE_NODE_ID) }); // This is to ensure that we elimnate any non-determinism from the error @@ -2194,7 +2153,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // table then apply defaults until we find a conflict. That default must be the one // that caused conflict earlier. fn find_conflicting_default(&self, - unbound_vars: &HashSet>, + unbound_vars: &FnvHashSet>, default_map: &FnvHashMap<&Ty<'tcx>, type_variable::Default<'tcx>>, conflict: Ty<'tcx>) -> Option> { @@ -2265,10 +2224,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Ok(()) => { } Err(errors) => { self.report_fulfillment_errors(&errors); } } - - if let Err(ref errors) = fulfillment_cx.select_rfc1592_obligations(self) { - self.report_fulfillment_errors_as_warnings(errors, self.body_id); - } } /// Select as many obligations as we can at present. @@ -2433,7 +2388,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.tcx.types.err } else { match method_fn_ty.sty { - ty::TyFnDef(_, _, ref fty) => { + ty::TyFnDef(.., ref fty) => { // HACK(eddyb) ignore self in the definition (see above). let expected_arg_tys = self.expected_types_for_fn_args(sp, expected, fty.sig.0.output, @@ -2489,16 +2444,18 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if arg_count == 1 {" was"} else {"s were"}), error_code); - err.span_label(sp, &format!("expected {}{} parameter{}", - if variadic {"at least "} else {""}, - expected_count, - if expected_count == 1 {""} else {"s"})); - let input_types = fn_inputs.iter().map(|i| format!("{:?}", i)).collect::>(); - if input_types.len() > 0 { - err.note(&format!("the following parameter type{} expected: {}", - if expected_count == 1 {" was"} else {"s were"}, - input_types.join(", "))); + if input_types.len() > 1 { + err.note("the following parameter types were expected:"); + err.note(&input_types.join(", ")); + } else if input_types.len() > 0 { + err.note(&format!("the following parameter type was expected: {}", + input_types[0])); + } else { + err.span_label(sp, &format!("expected {}{} parameter{}", + if variadic {"at least "} else {""}, + expected_count, + if expected_count == 1 {""} else {"s"})); } err.emit(); } @@ -2606,13 +2563,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Expectation::rvalue_hint(self, ty) }); - self.check_expr_with_expectation(&arg, - expected.unwrap_or(ExpectHasType(formal_ty))); + let checked_ty = self.check_expr_with_expectation(&arg, + expected.unwrap_or(ExpectHasType(formal_ty))); // 2. Coerce to the most detailed type that could be coerced // to, which is `expected_ty` if `rvalue_hint` returns an // `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise. let coerce_ty = expected.and_then(|e| e.only_has_type(self)); - self.demand_coerce(&arg, coerce_ty.unwrap_or(formal_ty)); + self.demand_coerce(&arg, checked_ty, coerce_ty.unwrap_or(formal_ty)); // 3. Relate the expected type and the formal one, // if the expected type was used for the coercion. @@ -2643,12 +2600,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // arguments which we skipped above. if variadic { for arg in args.iter().skip(expected_arg_count) { - self.check_expr(&arg); + let arg_ty = self.check_expr(&arg); // There are a few types which get autopromoted when passed via varargs // in C but we just error out instead and require explicit casts. let arg_ty = self.structurally_resolved_type(arg.span, - self.expr_ty(&arg)); + arg_ty); match arg_ty.sty { ty::TyFloat(ast::FloatTy::F32) => { self.type_error_message(arg.span, |t| { @@ -2670,7 +2627,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { t) }, arg_ty); } - ty::TyFnDef(_, _, f) => { + ty::TyFnDef(.., f) => { let ptr_ty = self.tcx.mk_fn_ptr(f); let ptr_ty = self.resolve_type_vars_if_possible(&ptr_ty); self.type_error_message(arg.span, @@ -2689,12 +2646,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { (0..len).map(|_| self.tcx.types.err).collect() } - fn write_call(&self, - call_expr: &hir::Expr, - output: Ty<'tcx>) { - self.write_ty(call_expr.id, output); - } - // AST fragment checking fn check_lit(&self, lit: &ast::Lit, @@ -2744,41 +2695,43 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn check_expr_eq_type(&self, expr: &'gcx hir::Expr, expected: Ty<'tcx>) { - self.check_expr_with_hint(expr, expected); - self.demand_eqtype(expr.span, expected, self.expr_ty(expr)); + let ty = self.check_expr_with_hint(expr, expected); + self.demand_eqtype(expr.span, expected, ty); } pub fn check_expr_has_type(&self, expr: &'gcx hir::Expr, - expected: Ty<'tcx>) { - self.check_expr_with_hint(expr, expected); - self.demand_suptype(expr.span, expected, self.expr_ty(expr)); + expected: Ty<'tcx>) -> Ty<'tcx> { + let ty = self.check_expr_with_hint(expr, expected); + self.demand_suptype(expr.span, expected, ty); + ty } fn check_expr_coercable_to_type(&self, expr: &'gcx hir::Expr, - expected: Ty<'tcx>) { - self.check_expr_with_hint(expr, expected); - self.demand_coerce(expr, expected); + expected: Ty<'tcx>) -> Ty<'tcx> { + let ty = self.check_expr_with_hint(expr, expected); + self.demand_coerce(expr, ty, expected); + ty } fn check_expr_with_hint(&self, expr: &'gcx hir::Expr, - expected: Ty<'tcx>) { + expected: Ty<'tcx>) -> Ty<'tcx> { self.check_expr_with_expectation(expr, ExpectHasType(expected)) } fn check_expr_with_expectation(&self, expr: &'gcx hir::Expr, - expected: Expectation<'tcx>) { + expected: Expectation<'tcx>) -> Ty<'tcx> { self.check_expr_with_expectation_and_lvalue_pref(expr, expected, NoPreference) } - fn check_expr(&self, expr: &'gcx hir::Expr) { + fn check_expr(&self, expr: &'gcx hir::Expr) -> Ty<'tcx> { self.check_expr_with_expectation(expr, NoExpectation) } fn check_expr_with_lvalue_pref(&self, expr: &'gcx hir::Expr, - lvalue_pref: LvaluePreference) { + lvalue_pref: LvaluePreference) -> Ty<'tcx> { self.check_expr_with_expectation_and_lvalue_pref(expr, NoExpectation, lvalue_pref) } @@ -2790,22 +2743,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { span: Span, // (potential) receiver for this impl did: DefId) -> TypeAndSubsts<'tcx> { - let tcx = self.tcx; - - let ity = tcx.lookup_item_type(did); - let (tps, rps, raw_ty) = - (ity.generics.types.get_slice(subst::TypeSpace), - ity.generics.regions.get_slice(subst::TypeSpace), - ity.ty); + let ity = self.tcx.lookup_item_type(did); + debug!("impl_self_ty: ity={:?}", ity); - debug!("impl_self_ty: tps={:?} rps={:?} raw_ty={:?}", tps, rps, raw_ty); - - let rps = self.region_vars_for_defs(span, rps); - let mut substs = subst::Substs::new( - VecPerParamSpace::empty(), - VecPerParamSpace::new(rps, Vec::new(), Vec::new())); - self.type_vars_for_defs(span, ParamSpace::TypeSpace, &mut substs, tps); - let substd_ty = self.instantiate_type_scheme(span, &substs, &raw_ty); + let substs = self.fresh_substs_for_item(span, did); + let substd_ty = self.instantiate_type_scheme(span, &substs, &ity.ty); TypeAndSubsts { substs: substs, ty: substd_ty } } @@ -2854,12 +2796,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { args: &'gcx [P], tps: &[P], expected: Expectation<'tcx>, - lvalue_pref: LvaluePreference) { + lvalue_pref: LvaluePreference) -> Ty<'tcx> { let rcvr = &args[0]; - self.check_expr_with_lvalue_pref(&rcvr, lvalue_pref); + let rcvr_t = self.check_expr_with_lvalue_pref(&rcvr, lvalue_pref); // no need to check for bot/err -- callee does that - let expr_t = self.structurally_resolved_type(expr.span, self.expr_ty(&rcvr)); + let expr_t = self.structurally_resolved_type(expr.span, rcvr_t); let tps = tps.iter().map(|ast_ty| self.to_ty(&ast_ty)).collect::>(); let fn_ty = match self.lookup_method(method_name.span, @@ -2890,7 +2832,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { DontTupleArguments, expected); - self.write_call(expr, ret_ty); + ret_ty } // A generic function for checking the then and else in an if @@ -2899,27 +2841,24 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { cond_expr: &'gcx hir::Expr, then_blk: &'gcx hir::Block, opt_else_expr: Option<&'gcx hir::Expr>, - id: ast::NodeId, sp: Span, - expected: Expectation<'tcx>) { - self.check_expr_has_type(cond_expr, self.tcx.types.bool); + expected: Expectation<'tcx>) -> Ty<'tcx> { + let cond_ty = self.check_expr_has_type(cond_expr, self.tcx.types.bool); let expected = expected.adjust_for_branches(self); - self.check_block_with_expected(then_blk, expected); - let then_ty = self.node_ty(then_blk.id); + let then_ty = self.check_block_with_expected(then_blk, expected); let unit = self.tcx.mk_nil(); let (origin, expected, found, result) = if let Some(else_expr) = opt_else_expr { - self.check_expr_with_expectation(else_expr, expected); - let else_ty = self.expr_ty(else_expr); + let else_ty = self.check_expr_with_expectation(else_expr, expected); let origin = TypeOrigin::IfExpression(sp); // Only try to coerce-unify if we have a then expression // to assign coercions to, otherwise it's () or diverging. let result = if let Some(ref then) = then_blk.expr { let res = self.try_find_coercion_lub(origin, || Some(&**then), - then_ty, else_expr); + then_ty, else_expr, else_ty); // In case we did perform an adjustment, we have to update // the type of the block, because old trans still uses it. @@ -2952,9 +2891,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { })) }; - let if_ty = match result { + match result { Ok(ty) => { - if self.expr_ty(cond_expr).references_error() { + if cond_ty.references_error() { self.tcx.types.err } else { ty @@ -2964,9 +2903,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.report_mismatched_types(origin, expected, found, e); self.tcx.types.err } - }; - - self.write_ty(id, if_ty); + } } // Check field access expressions @@ -2974,32 +2911,33 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { expr: &'gcx hir::Expr, lvalue_pref: LvaluePreference, base: &'gcx hir::Expr, - field: &Spanned) { - self.check_expr_with_lvalue_pref(base, lvalue_pref); + field: &Spanned) -> Ty<'tcx> { + let expr_t = self.check_expr_with_lvalue_pref(base, lvalue_pref); let expr_t = self.structurally_resolved_type(expr.span, - self.expr_ty(base)); + expr_t); let mut private_candidate = None; let mut autoderef = self.autoderef(expr.span, expr_t); while let Some((base_t, autoderefs)) = autoderef.next() { - if let ty::TyStruct(base_def, substs) = base_t.sty { - debug!("struct named {:?}", base_t); - if let Some(field) = base_def.struct_variant().find_field_named(field.node) { - let field_ty = self.field_ty(expr.span, field, substs); - if field.vis.is_accessible_from(self.body_id, &self.tcx().map) { - autoderef.finalize(lvalue_pref, Some(base)); - self.write_ty(expr.id, field_ty); - self.write_autoderef_adjustment(base.id, autoderefs); - return; + match base_t.sty { + ty::TyAdt(base_def, substs) if !base_def.is_enum() => { + debug!("struct named {:?}", base_t); + if let Some(field) = base_def.struct_variant().find_field_named(field.node) { + let field_ty = self.field_ty(expr.span, field, substs); + if field.vis.is_accessible_from(self.body_id, &self.tcx().map) { + autoderef.finalize(lvalue_pref, Some(base)); + self.write_autoderef_adjustment(base.id, autoderefs); + return field_ty; + } + private_candidate = Some((base_def.did, field_ty)); } - private_candidate = Some((base_def.did, field_ty)); } + _ => {} } } autoderef.unambiguous_final_ty(); if let Some((did, field_ty)) = private_candidate { let struct_path = self.tcx().item_path_str(did); - self.write_ty(expr.id, field_ty); let msg = format!("field `{}` of struct `{}` is private", field.node, struct_path); let mut err = self.tcx().sess.struct_span_err(expr.span, &msg); // Also check if an accessible method exists, which is often what is meant. @@ -3008,8 +2946,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { field.node)); } err.emit(); + field_ty } else if field.node == keywords::Invalid.name() { - self.write_error(expr.id); + self.tcx().types.err } else if self.method_exists(field.span, field.node, expr_t, expr.id, true) { self.type_error_struct(field.span, |actual| { format!("attempted to take value of method `{}` on type \ @@ -3018,26 +2957,37 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { .help("maybe a `()` to call it is missing? \ If not, try an anonymous function") .emit(); - self.write_error(expr.id); + self.tcx().types.err } else { let mut err = self.type_error_struct(expr.span, |actual| { format!("attempted access of field `{}` on type `{}`, \ but no field with that name was found", field.node, actual) }, expr_t); - if let ty::TyStruct(def, _) = expr_t.sty { - Self::suggest_field_names(&mut err, def.struct_variant(), field, vec![]); + match expr_t.sty { + ty::TyAdt(def, _) if !def.is_enum() => { + if let Some(suggested_field_name) = + Self::suggest_field_name(def.struct_variant(), field, vec![]) { + err.span_help(field.span, + &format!("did you mean `{}`?", suggested_field_name)); + }; + } + ty::TyRawPtr(..) => { + err.note(&format!("`{0}` is a native pointer; perhaps you need to deref with \ + `(*{0}).{1}`", pprust::expr_to_string(base), field.node)); + } + _ => {} } err.emit(); - self.write_error(expr.id); + self.tcx().types.err } } - // displays hints about the closest matches in field names - fn suggest_field_names(err: &mut DiagnosticBuilder, - variant: ty::VariantDef<'tcx>, - field: &Spanned, - skip : Vec) { + // Return an hint about the closest match in field names + fn suggest_field_name(variant: ty::VariantDef<'tcx>, + field: &Spanned, + skip : Vec) + -> Option { let name = field.node.as_str(); let names = variant.fields.iter().filter_map(|field| { // ignore already set fields and private fields from non-local crates @@ -3050,10 +3000,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }); // only find fits with at least one matching letter - if let Some(name) = find_best_match_for_name(names, &name, Some(name.len())) { - err.span_help(field.span, - &format!("did you mean `{}`?", name)); - } + find_best_match_for_name(names, &name, Some(name.len())) } // Check tuple index expressions @@ -3061,16 +3008,16 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { expr: &'gcx hir::Expr, lvalue_pref: LvaluePreference, base: &'gcx hir::Expr, - idx: codemap::Spanned) { - self.check_expr_with_lvalue_pref(base, lvalue_pref); + idx: codemap::Spanned) -> Ty<'tcx> { + let expr_t = self.check_expr_with_lvalue_pref(base, lvalue_pref); let expr_t = self.structurally_resolved_type(expr.span, - self.expr_ty(base)); + expr_t); let mut private_candidate = None; let mut tuple_like = false; let mut autoderef = self.autoderef(expr.span, expr_t); while let Some((base_t, autoderefs)) = autoderef.next() { let field = match base_t.sty { - ty::TyStruct(base_def, substs) => { + ty::TyAdt(base_def, substs) if base_def.is_struct() => { tuple_like = base_def.struct_variant().kind == ty::VariantKind::Tuple; if !tuple_like { continue } @@ -3094,9 +3041,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let Some(field_ty) = field { autoderef.finalize(lvalue_pref, Some(base)); - self.write_ty(expr.id, field_ty); self.write_autoderef_adjustment(base.id, autoderefs); - return; + return field_ty; } } autoderef.unambiguous_final_ty(); @@ -3105,8 +3051,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let struct_path = self.tcx().item_path_str(did); let msg = format!("field `{}` of struct `{}` is private", idx.node, struct_path); self.tcx().sess.span_err(expr.span, &msg); - self.write_ty(expr.id, field_ty); - return; + return field_ty; } self.type_error_message( @@ -3126,29 +3071,48 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }, expr_t); - self.write_error(expr.id); + self.tcx().types.err } fn report_unknown_field(&self, ty: Ty<'tcx>, variant: ty::VariantDef<'tcx>, field: &hir::Field, - skip_fields: &[hir::Field]) { + skip_fields: &[hir::Field], + kind_name: &str) { let mut err = self.type_error_struct_with_diag( field.name.span, - |actual| if let ty::TyEnum(..) = ty.sty { - struct_span_err!(self.tcx.sess, field.name.span, E0559, - "struct variant `{}::{}` has no field named `{}`", - actual, variant.name.as_str(), field.name.node) - } else { - struct_span_err!(self.tcx.sess, field.name.span, E0560, - "structure `{}` has no field named `{}`", - actual, field.name.node) + |actual| match ty.sty { + ty::TyAdt(adt, ..) if adt.is_enum() => { + struct_span_err!(self.tcx.sess, field.name.span, E0559, + "{} `{}::{}` has no field named `{}`", + kind_name, actual, variant.name.as_str(), field.name.node) + } + _ => { + struct_span_err!(self.tcx.sess, field.name.span, E0560, + "{} `{}` has no field named `{}`", + kind_name, actual, field.name.node) + } }, ty); // prevent all specified fields from being suggested let skip_fields = skip_fields.iter().map(|ref x| x.name.node.as_str()); - Self::suggest_field_names(&mut err, variant, &field.name, skip_fields.collect()); + if let Some(field_name) = Self::suggest_field_name(variant, + &field.name, + skip_fields.collect()) { + err.span_label(field.name.span, + &format!("field does not exist - did you mean `{}`?", field_name)); + } else { + match ty.sty { + ty::TyAdt(adt, ..) if adt.is_enum() => { + err.span_label(field.name.span, &format!("`{}::{}` does not have this field", + ty, variant.name.as_str())); + } + _ => { + err.span_label(field.name.span, &format!("`{}` does not have this field", ty)); + } + } + }; err.emit(); } @@ -3159,8 +3123,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ast_fields: &'gcx [hir::Field], check_completeness: bool) { let tcx = self.tcx; - let substs = match adt_ty.sty { - ty::TyStruct(_, substs) | ty::TyEnum(_, substs) => substs, + let (substs, kind_name) = match adt_ty.sty { + ty::TyAdt(adt, substs) => (substs, adt.variant_descr()), _ => span_bug!(span, "non-ADT passed to check_expr_struct_fields") }; @@ -3199,7 +3163,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { err.emit(); } else { - self.report_unknown_field(adt_ty, variant, field, ast_fields); + self.report_unknown_field(adt_ty, variant, field, ast_fields, kind_name); } } @@ -3208,35 +3172,55 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.check_expr_coercable_to_type(&field.expr, expected_field_type); } - // Make sure the programmer specified all the fields. - if check_completeness && - !error_happened && - !remaining_fields.is_empty() - { - span_err!(tcx.sess, span, E0063, - "missing field{} {} in initializer of `{}`", - if remaining_fields.len() == 1 {""} else {"s"}, - remaining_fields.keys() - .map(|n| format!("`{}`", n)) - .collect::>() - .join(", "), - adt_ty); - } + // Make sure the programmer specified correct number of fields. + if kind_name == "union" { + if ast_fields.len() != 1 { + tcx.sess.span_err(span, "union expressions should have exactly one field"); + } + } else if check_completeness && !error_happened && !remaining_fields.is_empty() { + let len = remaining_fields.len(); + + let mut displayable_field_names = remaining_fields + .keys() + .map(|x| x.as_str()) + .collect::>(); + + displayable_field_names.sort(); + + let truncated_fields_error = if len <= 3 { + "".to_string() + } else { + format!(" and {} other field{}", (len - 3), if len - 3 == 1 {""} else {"s"}) + }; + let remaining_fields_names = displayable_field_names.iter().take(3) + .map(|n| format!("`{}`", n)) + .collect::>() + .join(", "); + + struct_span_err!(tcx.sess, span, E0063, + "missing field{} {}{} in initializer of `{}`", + if remaining_fields.len() == 1 {""} else {"s"}, + remaining_fields_names, + truncated_fields_error, + adt_ty) + .span_label(span, &format!("missing {}{}", + remaining_fields_names, + truncated_fields_error)) + .emit(); + } } fn check_struct_fields_on_error(&self, - id: ast::NodeId, fields: &'gcx [hir::Field], base_expr: &'gcx Option>) { - // Make sure to still write the types - // otherwise we might ICE - self.write_error(id); for field in fields { self.check_expr(&field.expr); } match *base_expr { - Some(ref base) => self.check_expr(&base), + Some(ref base) => { + self.check_expr(&base); + }, None => {} } } @@ -3252,28 +3236,32 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.set_tainted_by_errors(); return None; } - Def::Variant(..) | Def::Struct(..) => { - Some(self.tcx.expect_variant_def(def)) + Def::Variant(did) => { + let type_did = self.tcx.parent_def_id(did).unwrap(); + Some((type_did, self.tcx.expect_variant_def(def))) } - Def::TyAlias(did) | Def::AssociatedTy(_, did) => { - if let Some(&ty::TyStruct(adt, _)) = self.tcx.opt_lookup_item_type(did) - .map(|scheme| &scheme.ty.sty) { - Some(adt.struct_variant()) - } else { - None + Def::Struct(type_did) | Def::Union(type_did) => { + Some((type_did, self.tcx.expect_variant_def(def))) + } + Def::TyAlias(did) => { + match self.tcx.opt_lookup_item_type(did).map(|scheme| &scheme.ty.sty) { + Some(&ty::TyAdt(adt, _)) if !adt.is_enum() => { + Some((did, adt.struct_variant())) + } + _ => None, } } _ => None }; - if let Some(variant) = variant { + if let Some((def_id, variant)) = variant { if variant.kind == ty::VariantKind::Tuple && !self.tcx.sess.features.borrow().relaxed_adts { - emit_feature_err(&self.tcx.sess.parse_sess.span_diagnostic, + emit_feature_err(&self.tcx.sess.parse_sess, "relaxed_adts", span, GateIssue::Language, "tuple structs and variants in struct patterns are unstable"); } - let ty = self.instantiate_type_path(def.def_id(), path, node_id); + let ty = self.instantiate_type_path(def_id, path, node_id); Some((variant, ty)) } else { struct_span_err!(self.tcx.sess, path.span, E0071, @@ -3289,23 +3277,23 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { expr: &hir::Expr, path: &hir::Path, fields: &'gcx [hir::Field], - base_expr: &'gcx Option>) + base_expr: &'gcx Option>) -> Ty<'tcx> { // Find the relevant variant - let (variant, expr_ty) = if let Some(variant_ty) = self.check_struct_path(path, expr.id, - expr.span) { + let (variant, struct_ty) = if let Some(variant_ty) = self.check_struct_path(path, expr.id, + expr.span) { variant_ty } else { - self.check_struct_fields_on_error(expr.id, fields, base_expr); - return; + self.check_struct_fields_on_error(fields, base_expr); + return self.tcx().types.err; }; - self.check_expr_struct_fields(expr_ty, path.span, variant, fields, + self.check_expr_struct_fields(struct_ty, path.span, variant, fields, base_expr.is_none()); if let &Some(ref base_expr) = base_expr { - self.check_expr_has_type(base_expr, expr_ty); - match expr_ty.sty { - ty::TyStruct(adt, substs) => { + self.check_expr_has_type(base_expr, struct_ty); + match struct_ty.sty { + ty::TyAdt(adt, substs) if adt.is_struct() => { self.tables.borrow_mut().fru_field_types.insert( expr.id, adt.struct_variant().fields.iter().map(|f| { @@ -3321,6 +3309,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } } + self.require_type_is_sized(struct_ty, expr.span, traits::StructInitializerSized); + struct_ty } @@ -3337,10 +3327,35 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn check_expr_with_expectation_and_lvalue_pref(&self, expr: &'gcx hir::Expr, expected: Expectation<'tcx>, - lvalue_pref: LvaluePreference) { + lvalue_pref: LvaluePreference) -> Ty<'tcx> { debug!(">> typechecking: expr={:?} expected={:?}", expr, expected); + let ty = self.check_expr_kind(expr, expected, lvalue_pref); + self.write_ty(expr.id, ty); + + debug!("type of expr({}) {} is...", expr.id, + pprust::expr_to_string(expr)); + debug!("... {:?}, expected is {:?}", + ty, + expected); + + // Add adjustments to !-expressions + if ty.is_never() { + if let Some(hir::map::NodeExpr(_)) = self.tcx.map.find(expr.id) { + let adj_ty = self.next_diverging_ty_var(); + let adj = adjustment::AdjustNeverToAny(adj_ty); + self.write_adjustment(expr.id, adj); + return adj_ty; + } + } + ty + } + + fn check_expr_kind(&self, + expr: &'gcx hir::Expr, + expected: Expectation<'tcx>, + lvalue_pref: LvaluePreference) -> Ty<'tcx> { let tcx = self.tcx; let id = expr.id; match expr.node { @@ -3351,20 +3366,18 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { _ => NoExpectation } }); - self.check_expr_with_expectation(subexpr, expected_inner); - let referent_ty = self.expr_ty(&subexpr); - self.write_ty(id, tcx.mk_box(referent_ty)); + let referent_ty = self.check_expr_with_expectation(subexpr, expected_inner); + tcx.mk_box(referent_ty) } hir::ExprLit(ref lit) => { - let typ = self.check_lit(&lit, expected); - self.write_ty(id, typ); + self.check_lit(&lit, expected) } hir::ExprBinary(op, ref lhs, ref rhs) => { - self.check_binop(expr, op, lhs, rhs); + self.check_binop(expr, op, lhs, rhs) } hir::ExprAssignOp(op, ref lhs, ref rhs) => { - self.check_binop_assign(expr, op, lhs, rhs); + self.check_binop_assign(expr, op, lhs, rhs) } hir::ExprUnary(unop, ref oprnd) => { let expected_inner = match unop { @@ -3379,10 +3392,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { hir::UnDeref => lvalue_pref, _ => NoPreference }; - self.check_expr_with_expectation_and_lvalue_pref(&oprnd, - expected_inner, - lvalue_pref); - let mut oprnd_t = self.expr_ty(&oprnd); + let mut oprnd_t = self.check_expr_with_expectation_and_lvalue_pref(&oprnd, + expected_inner, + lvalue_pref); if !oprnd_t.references_error() { match unop { @@ -3424,7 +3436,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } } - self.write_ty(id, oprnd_t); + oprnd_t } hir::ExprAddrOf(mutbl, ref oprnd) => { let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| { @@ -3443,10 +3455,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } }); let lvalue_pref = LvaluePreference::from_mutbl(mutbl); - self.check_expr_with_expectation_and_lvalue_pref(&oprnd, hint, lvalue_pref); + let ty = self.check_expr_with_expectation_and_lvalue_pref(&oprnd, hint, lvalue_pref); - let tm = ty::TypeAndMut { ty: self.expr_ty(&oprnd), mutbl: mutbl }; - let oprnd_t = if tm.ty.references_error() { + let tm = ty::TypeAndMut { ty: ty, mutbl: mutbl }; + if tm.ty.references_error() { tcx.types.err } else { // Note: at this point, we cannot say what the best lifetime @@ -3463,29 +3475,27 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // value whose address was taken can actually be made to live // as long as it needs to live. let region = self.next_region_var(infer::AddrOfRegion(expr.span)); - tcx.mk_ref(tcx.mk_region(region), tm) - }; - self.write_ty(id, oprnd_t); + tcx.mk_ref(region, tm) + } } hir::ExprPath(ref opt_qself, ref path) => { let opt_self_ty = opt_qself.as_ref().map(|qself| self.to_ty(&qself.ty)); let (def, opt_ty, segments) = self.resolve_ty_and_def_ufcs(opt_self_ty, path, expr.id, expr.span); - if def != Def::Err { - let (scheme, predicates) = self.type_scheme_and_predicates_for_def(expr.span, - def); - self.instantiate_value_path(segments, scheme, &predicates, - opt_ty, def, expr.span, id); + let ty = if def != Def::Err { + self.instantiate_value_path(segments, opt_ty, def, expr.span, id) } else { self.set_tainted_by_errors(); - self.write_error(id); - } + tcx.types.err + }; // We always require that the type provided as the value for // a type parameter outlives the moment of instantiation. self.opt_node_ty_substs(expr.id, |item_substs| { self.add_wf_bounds(&item_substs.substs, expr); }); + + ty } hir::ExprInlineAsm(_, ref outputs, ref inputs) => { for output in outputs { @@ -3494,10 +3504,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { for input in inputs { self.check_expr(input); } - self.write_nil(id); + tcx.mk_nil() } - hir::ExprBreak(_) => { self.write_never(id); } - hir::ExprAgain(_) => { self.write_never(id); } + hir::ExprBreak(_) => { tcx.types.never } + hir::ExprAgain(_) => { tcx.types.never } hir::ExprRet(ref expr_opt) => { if let Some(ref e) = *expr_opt { self.check_expr_coercable_to_type(&e, self.ret_ty); @@ -3515,10 +3525,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { .emit(); } } - self.write_never(id); + tcx.types.never } hir::ExprAssign(ref lhs, ref rhs) => { - self.check_expr_with_lvalue_pref(&lhs, PreferMutLvalue); + let lhs_ty = self.check_expr_with_lvalue_pref(&lhs, PreferMutLvalue); let tcx = self.tcx; if !tcx.expr_is_lval(&lhs) { @@ -3531,66 +3541,53 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { .emit(); } - let lhs_ty = self.expr_ty(&lhs); - self.check_expr_coercable_to_type(&rhs, lhs_ty); - let rhs_ty = self.expr_ty(&rhs); + let rhs_ty = self.check_expr_coercable_to_type(&rhs, lhs_ty); - self.require_expr_have_sized_type(&lhs, traits::AssignmentLhsSized); + self.require_type_is_sized(lhs_ty, lhs.span, traits::AssignmentLhsSized); if lhs_ty.references_error() || rhs_ty.references_error() { - self.write_error(id); + tcx.types.err } else { - self.write_nil(id); + tcx.mk_nil() } } hir::ExprIf(ref cond, ref then_blk, ref opt_else_expr) => { self.check_then_else(&cond, &then_blk, opt_else_expr.as_ref().map(|e| &**e), - id, expr.span, expected); + expr.span, expected) } hir::ExprWhile(ref cond, ref body, _) => { - self.check_expr_has_type(&cond, tcx.types.bool); + let cond_ty = self.check_expr_has_type(&cond, tcx.types.bool); self.check_block_no_value(&body); - let cond_ty = self.expr_ty(&cond); let body_ty = self.node_ty(body.id); if cond_ty.references_error() || body_ty.references_error() { - self.write_error(id); + tcx.types.err } else { - self.write_nil(id); + tcx.mk_nil() } } hir::ExprLoop(ref body, _) => { self.check_block_no_value(&body); if !may_break(tcx, expr.id, &body) { - self.write_never(id); + tcx.types.never } else { - self.write_nil(id); + tcx.mk_nil() } } hir::ExprMatch(ref discrim, ref arms, match_src) => { - self.check_match(expr, &discrim, arms, expected, match_src); + self.check_match(expr, &discrim, arms, expected, match_src) } hir::ExprClosure(capture, ref decl, ref body, _) => { - self.check_expr_closure(expr, capture, &decl, &body, expected); + self.check_expr_closure(expr, capture, &decl, &body, expected) } hir::ExprBlock(ref b) => { - self.check_block_with_expected(&b, expected); - self.write_ty(id, self.node_ty(b.id)); + self.check_block_with_expected(&b, expected) } hir::ExprCall(ref callee, ref args) => { - self.check_call(expr, &callee, &args[..], expected); - - // we must check that return type of called functions is WF: - let ret_ty = self.expr_ty(expr); - self.register_wf_obligation(ret_ty, expr.span, traits::MiscObligation); + self.check_call(expr, &callee, &args[..], expected) } hir::ExprMethodCall(name, ref tps, ref args) => { - self.check_method_call(expr, name, &args[..], &tps[..], expected, lvalue_pref); - let arg_tys = args.iter().map(|a| self.expr_ty(&a)); - let args_err = arg_tys.fold(false, |rest_err, a| rest_err || a.references_error()); - if args_err { - self.write_error(id); - } + self.check_method_call(expr, name, &args[..], &tps[..], expected, lvalue_pref) } hir::ExprCast(ref e, ref t) => { if let hir::TyFixedLengthVec(_, ref count_expr) = t.node { @@ -3601,26 +3598,22 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // if appropriate. let t_cast = self.to_ty(t); let t_cast = self.resolve_type_vars_if_possible(&t_cast); - self.check_expr_with_expectation(e, ExpectCastableToType(t_cast)); - let t_expr = self.expr_ty(e); + let t_expr = self.check_expr_with_expectation(e, ExpectCastableToType(t_cast)); let t_cast = self.resolve_type_vars_if_possible(&t_cast); // Eagerly check for some obvious errors. if t_expr.references_error() || t_cast.references_error() { - self.write_error(id); + tcx.types.err } else { - // Write a type for the whole expression, assuming everything is going - // to work out Ok. - self.write_ty(id, t_cast); - // Defer other checks until we're done type checking. let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut(); match cast::CastCheck::new(self, e, t_expr, t_cast, t.span, expr.span) { Ok(cast_check) => { deferred_cast_checks.push(cast_check); + t_cast } Err(ErrorReported) => { - self.write_error(id); + tcx.types.err } } } @@ -3628,7 +3621,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { hir::ExprType(ref e, ref t) => { let typ = self.to_ty(&t); self.check_expr_eq_type(&e, typ); - self.write_ty(id, typ); + typ } hir::ExprVec(ref args) => { let uty = expected.to_option(self).and_then(|uty| { @@ -3642,16 +3635,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let coerce_to = uty.unwrap_or(unified); for (i, e) in args.iter().enumerate() { - self.check_expr_with_hint(e, coerce_to); - let e_ty = self.expr_ty(e); + let e_ty = self.check_expr_with_hint(e, coerce_to); let origin = TypeOrigin::Misc(e.span); // Special-case the first element, as it has no "previous expressions". let result = if i == 0 { - self.try_coerce(e, coerce_to) + self.try_coerce(e, e_ty, coerce_to) } else { let prev_elems = || args[..i].iter().map(|e| &**e); - self.try_find_coercion_lub(origin, prev_elems, unified, e) + self.try_find_coercion_lub(origin, prev_elems, unified, e, e_ty) }; match result { @@ -3661,7 +3653,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } } - self.write_ty(id, tcx.mk_array(unified, args.len())); + tcx.mk_array(unified, args.len()) } hir::ExprRepeat(ref element, ref count_expr) => { self.check_expr_has_type(&count_expr, tcx.types.usize); @@ -3685,8 +3677,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } None => { let t: Ty = self.next_ty_var(); - self.check_expr_has_type(&element, t); - (self.expr_ty(&element), t) + let element_ty = self.check_expr_has_type(&element, t); + (element_ty, t) } }; @@ -3697,10 +3689,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } if element_ty.references_error() { - self.write_error(id); + tcx.types.err } else { - let t = tcx.mk_array(t, count); - self.write_ty(id, t); + tcx.mk_array(t, count) } } hir::ExprTup(ref elts) => { @@ -3720,49 +3711,41 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ety } _ => { - self.check_expr_with_expectation(&e, NoExpectation); - self.expr_ty(&e) + self.check_expr_with_expectation(&e, NoExpectation) } }; err_field = err_field || t.references_error(); t }).collect(); if err_field { - self.write_error(id); + tcx.types.err } else { - let typ = tcx.mk_tup(elt_ts); - self.write_ty(id, typ); + tcx.mk_tup(elt_ts) } } hir::ExprStruct(ref path, ref fields, ref base_expr) => { - self.check_expr_struct(expr, path, fields, base_expr); - - self.require_expr_have_sized_type(expr, traits::StructInitializerSized); + self.check_expr_struct(expr, path, fields, base_expr) } hir::ExprField(ref base, ref field) => { - self.check_field(expr, lvalue_pref, &base, field); + self.check_field(expr, lvalue_pref, &base, field) } hir::ExprTupField(ref base, idx) => { - self.check_tup_field(expr, lvalue_pref, &base, idx); + self.check_tup_field(expr, lvalue_pref, &base, idx) } hir::ExprIndex(ref base, ref idx) => { - self.check_expr_with_lvalue_pref(&base, lvalue_pref); - self.check_expr(&idx); - - let base_t = self.expr_ty(&base); - let idx_t = self.expr_ty(&idx); + let base_t = self.check_expr_with_lvalue_pref(&base, lvalue_pref); + let idx_t = self.check_expr(&idx); if base_t.references_error() { - self.write_ty(id, base_t); + base_t } else if idx_t.references_error() { - self.write_ty(id, idx_t); + idx_t } else { let base_t = self.structurally_resolved_type(expr.span, base_t); match self.lookup_indexing(expr, base, base_t, idx_t, lvalue_pref) { Some((index_ty, element_ty)) => { - let idx_expr_ty = self.expr_ty(idx); - self.demand_eqtype(expr.span, index_ty, idx_expr_ty); - self.write_ty(id, element_ty); + self.demand_eqtype(expr.span, index_ty, idx_t); + element_ty } None => { self.check_expr_has_type(&idx, self.tcx.types.err); @@ -3798,18 +3781,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } err.emit(); - self.write_ty(id, self.tcx().types.err); + self.tcx().types.err } } } } } - - debug!("type of expr({}) {} is...", expr.id, - pprust::expr_to_string(expr)); - debug!("... {:?}, expected is {:?}", - self.expr_ty(expr), - expected); } // Finish resolving a path in a struct expression or pattern `S::A { .. }` if necessary. @@ -3889,7 +3866,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn check_decl_initializer(&self, local: &'gcx hir::Local, - init: &'gcx hir::Expr) + init: &'gcx hir::Expr) -> Ty<'tcx> { let ref_bindings = self.tcx.pat_contains_ref_binding(&local.pat); @@ -3903,12 +3880,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // referent for the reference that results is *equal to* the // type of the lvalue it is referencing, and not some // supertype thereof. - self.check_expr_with_lvalue_pref(init, LvaluePreference::from_mutbl(m)); - let init_ty = self.expr_ty(init); + let init_ty = self.check_expr_with_lvalue_pref(init, LvaluePreference::from_mutbl(m)); self.demand_eqtype(init.span, init_ty, local_ty); + init_ty } else { self.check_expr_coercable_to_type(init, local_ty) - }; + } } pub fn check_decl_local(&self, local: &'gcx hir::Local) { @@ -3916,8 +3893,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.write_ty(local.id, t); if let Some(ref init) = local.init { - self.check_decl_initializer(local, &init); - let init_ty = self.expr_ty(&init); + let init_ty = self.check_decl_initializer(local, &init); if init_ty.references_error() { self.write_ty(local.id, init_ty); } @@ -3930,7 +3906,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - pub fn check_stmt(&self, stmt: &'gcx hir::Stmt) { + pub fn check_stmt(&self, stmt: &'gcx hir::Stmt) { let node_id; let mut saw_bot = false; let mut saw_err = false; @@ -3950,17 +3926,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { hir::StmtExpr(ref expr, id) => { node_id = id; // Check with expected type of () - self.check_expr_has_type(&expr, self.tcx.mk_nil()); - let expr_ty = self.expr_ty(&expr); - saw_bot = saw_bot || self.type_var_diverges(expr_ty); - saw_err = saw_err || expr_ty.references_error(); + let ty = self.check_expr_has_type(&expr, self.tcx.mk_nil()); + saw_bot = saw_bot || self.type_var_diverges(ty); + saw_err = saw_err || ty.references_error(); } hir::StmtSemi(ref expr, id) => { node_id = id; - self.check_expr(&expr); - let expr_ty = self.expr_ty(&expr); - saw_bot |= self.type_var_diverges(expr_ty); - saw_err |= expr_ty.references_error(); + let ty = self.check_expr(&expr); + saw_bot |= self.type_var_diverges(ty); + saw_err |= ty.references_error(); } } if saw_bot { @@ -3970,13 +3944,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.write_error(node_id); } else { - self.write_nil(node_id) + self.write_nil(node_id); } } pub fn check_block_no_value(&self, blk: &'gcx hir::Block) { - self.check_block_with_expected(blk, ExpectHasType(self.tcx.mk_nil())); - let blkty = self.node_ty(blk.id); + let blkty = self.check_block_with_expected(blk, ExpectHasType(self.tcx.mk_nil())); if blkty.references_error() { self.write_error(blk.id); } else { @@ -3987,7 +3960,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn check_block_with_expected(&self, blk: &'gcx hir::Block, - expected: Expectation<'tcx>) { + expected: Expectation<'tcx>) -> Ty<'tcx> { let prev = { let mut fcx_ps = self.ps.borrow_mut(); let unsafety_state = fcx_ps.recurse(blk); @@ -4008,7 +3981,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { _ => false, } } - hir::StmtExpr(_, _) | hir::StmtSemi(_, _) => true, + hir::StmtExpr(..) | hir::StmtSemi(..) => true, } { self.tcx .sess @@ -4024,13 +3997,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { s_ty.is_never(); any_err = any_err || s_ty.references_error(); } - match blk.expr { + let ty = match blk.expr { None => if any_err { - self.write_error(blk.id); + self.tcx.types.err } else if any_diverges { - self.write_ty(blk.id, self.next_diverging_ty_var()); + self.next_diverging_ty_var() } else { - self.write_nil(blk.id); + self.tcx.mk_nil() }, Some(ref e) => { if any_diverges && !warned { @@ -4047,72 +4020,38 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ety } _ => { - self.check_expr_with_expectation(&e, expected); - self.expr_ty(&e) + self.check_expr_with_expectation(&e, expected) } }; if any_err { - self.write_error(blk.id); + self.tcx.types.err } else if any_diverges { - self.write_ty(blk.id, self.next_diverging_ty_var()); + self.next_diverging_ty_var() } else { - self.write_ty(blk.id, ety); + ety } } }; + self.write_ty(blk.id, ty); *self.ps.borrow_mut() = prev; - } - - // Returns the type parameter count and the type for the given definition. - fn type_scheme_and_predicates_for_def(&self, - sp: Span, - defn: Def) - -> (TypeScheme<'tcx>, GenericPredicates<'tcx>) { - match defn { - Def::Local(_, nid) | Def::Upvar(_, nid, _, _) => { - let typ = self.local_ty(sp, nid); - (ty::TypeScheme { generics: ty::Generics::empty(), ty: typ }, - ty::GenericPredicates::empty()) - } - Def::Fn(id) | Def::Method(id) | - Def::Static(id, _) | Def::Variant(_, id) | - Def::Struct(id) | Def::Const(id) | Def::AssociatedConst(id) => { - (self.tcx.lookup_item_type(id), self.tcx.lookup_predicates(id)) - } - Def::Trait(_) | - Def::Enum(..) | - Def::TyAlias(..) | - Def::AssociatedTy(..) | - Def::PrimTy(_) | - Def::TyParam(..) | - Def::Mod(..) | - Def::ForeignMod(..) | - Def::Label(..) | - Def::SelfTy(..) | - Def::Err => { - span_bug!(sp, "expected value, found {:?}", defn); - } - } + ty } // Instantiates the given path, which must refer to an item with the given // number of type parameters and type. pub fn instantiate_value_path(&self, segments: &[hir::PathSegment], - type_scheme: TypeScheme<'tcx>, - type_predicates: &ty::GenericPredicates<'tcx>, opt_self_ty: Option>, def: Def, span: Span, node_id: ast::NodeId) -> Ty<'tcx> { - debug!("instantiate_value_path(path={:?}, def={:?}, node_id={}, type_scheme={:?})", + debug!("instantiate_value_path(path={:?}, def={:?}, node_id={})", segments, def, - node_id, - type_scheme); + node_id); // We need to extract the type parameters supplied by the user in // the path `path`. Due to the current setup, this is a bit of a @@ -4186,54 +4125,38 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { assert!(!segments.is_empty()); let mut ufcs_associated = None; - let mut segment_spaces: Vec<_>; + let mut type_segment = None; + let mut fn_segment = None; match def { // Case 1 and 1b. Reference to a *type* or *enum variant*. - Def::SelfTy(..) | - Def::Struct(..) | - Def::Variant(..) | - Def::Enum(..) | - Def::TyAlias(..) | - Def::AssociatedTy(..) | - Def::Trait(..) | - Def::PrimTy(..) | - Def::TyParam(..) => { + Def::Struct(def_id) | + Def::Union(def_id) | + Def::Variant(def_id) | + Def::Enum(def_id) | + Def::TyAlias(def_id) | + Def::AssociatedTy(def_id) | + Def::Trait(def_id) => { // Everything but the final segment should have no // parameters at all. - segment_spaces = vec![None; segments.len() - 1]; - segment_spaces.push(Some(subst::TypeSpace)); + let mut generics = self.tcx.lookup_generics(def_id); + if let Some(def_id) = generics.parent { + // Variant and struct constructors use the + // generics of their parent type definition. + generics = self.tcx.lookup_generics(def_id); + } + type_segment = Some((segments.last().unwrap(), generics)); } // Case 2. Reference to a top-level value. - Def::Fn(..) | - Def::Const(..) | - Def::Static(..) => { - segment_spaces = vec![None; segments.len() - 1]; - segment_spaces.push(Some(subst::FnSpace)); - } - - // Case 3. Reference to a method. - Def::Method(def_id) => { - let container = self.tcx.impl_or_trait_item(def_id).container(); - match container { - ty::TraitContainer(trait_did) => { - callee::check_legal_trait_for_method_call(self.ccx, span, trait_did) - } - ty::ImplContainer(_) => {} - } - - if segments.len() >= 2 { - segment_spaces = vec![None; segments.len() - 2]; - segment_spaces.push(Some(subst::TypeSpace)); - segment_spaces.push(Some(subst::FnSpace)); - } else { - // `::method` will end up here, and so can `T::method`. - let self_ty = opt_self_ty.expect("UFCS sugared method missing Self"); - segment_spaces = vec![Some(subst::FnSpace)]; - ufcs_associated = Some((container, self_ty)); - } + Def::Fn(def_id) | + Def::Const(def_id) | + Def::Static(def_id, _) => { + fn_segment = Some((segments.last().unwrap(), + self.tcx.lookup_generics(def_id))); } + // Case 3. Reference to a method or associated const. + Def::Method(def_id) | Def::AssociatedConst(def_id) => { let container = self.tcx.impl_or_trait_item(def_id).container(); match container { @@ -4243,69 +4166,58 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ty::ImplContainer(_) => {} } + let generics = self.tcx.lookup_generics(def_id); if segments.len() >= 2 { - segment_spaces = vec![None; segments.len() - 2]; - segment_spaces.push(Some(subst::TypeSpace)); - segment_spaces.push(None); + let parent_generics = self.tcx.lookup_generics(generics.parent.unwrap()); + type_segment = Some((&segments[segments.len() - 2], parent_generics)); } else { - // `::CONST` will end up here, and so can `T::CONST`. - let self_ty = opt_self_ty.expect("UFCS sugared const missing Self"); - segment_spaces = vec![None]; + // `::assoc` will end up here, and so can `T::assoc`. + let self_ty = opt_self_ty.expect("UFCS sugared assoc missing Self"); ufcs_associated = Some((container, self_ty)); } + fn_segment = Some((segments.last().unwrap(), generics)); } // Other cases. Various nonsense that really shouldn't show up // here. If they do, an error will have been reported // elsewhere. (I hope) Def::Mod(..) | - Def::ForeignMod(..) | + Def::PrimTy(..) | + Def::SelfTy(..) | + Def::TyParam(..) | Def::Local(..) | Def::Label(..) | - Def::Upvar(..) => { - segment_spaces = vec![None; segments.len()]; - } - - Def::Err => { - self.set_tainted_by_errors(); - segment_spaces = vec![None; segments.len()]; - } + Def::Upvar(..) | + Def::Err => {} } - assert_eq!(segment_spaces.len(), segments.len()); // In `>::method`, `A` and `B` are mandatory, but // `opt_self_ty` can also be Some for `Foo::method`, where Foo's // type parameters are not mandatory. let require_type_space = opt_self_ty.is_some() && ufcs_associated.is_none(); - debug!("segment_spaces={:?}", segment_spaces); - - // Next, examine the definition, and determine how many type - // parameters we expect from each space. - let type_defs = &type_scheme.generics.types; - let region_defs = &type_scheme.generics.regions; + debug!("type_segment={:?} fn_segment={:?}", type_segment, fn_segment); // Now that we have categorized what space the parameters for each // segment belong to, let's sort out the parameters that the user // provided (if any) into their appropriate spaces. We'll also report // errors if type parameters are provided in an inappropriate place. - let mut substs = Substs::empty(); - for (&opt_space, segment) in segment_spaces.iter().zip(segments) { - if let Some(space) = opt_space { - self.push_explicit_parameters_from_segment_to_substs(space, - span, - type_defs, - region_defs, - segment, - &mut substs); - } else { - self.tcx.prohibit_type_params(slice::ref_slice(segment)); - } - } - if let Some(self_ty) = opt_self_ty { - if type_defs.len(subst::SelfSpace) == 1 { - substs.types.push(subst::SelfSpace, self_ty); + let poly_segments = type_segment.is_some() as usize + + fn_segment.is_some() as usize; + self.tcx.prohibit_type_params(&segments[..segments.len() - poly_segments]); + + match def { + Def::Local(def_id) | Def::Upvar(def_id, ..) => { + let nid = self.tcx.map.as_local_node_id(def_id).unwrap(); + let ty = self.local_ty(span, nid); + let ty = self.normalize_associated_types_in(span, &ty); + self.write_ty(node_id, ty); + self.write_substs(node_id, ty::ItemSubsts { + substs: Substs::empty(self.tcx) + }); + return ty; } + _ => {} } // Now we have to compare the types that the user *actually* @@ -4314,20 +4226,89 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // variables. If the user provided some types, we may still need // to add defaults. If the user provided *too many* types, that's // a problem. - for &space in &[subst::SelfSpace, subst::TypeSpace, subst::FnSpace] { - self.adjust_type_parameters(span, space, type_defs, - require_type_space, &mut substs); - assert_eq!(substs.types.len(space), type_defs.len(space)); + self.check_path_parameter_count(span, !require_type_space, &mut type_segment); + self.check_path_parameter_count(span, true, &mut fn_segment); - self.adjust_region_parameters(span, space, region_defs, &mut substs); - assert_eq!(substs.regions.len(space), region_defs.len(space)); - } + let (fn_start, has_self) = match (type_segment, fn_segment) { + (_, Some((_, generics))) => { + (generics.parent_count(), generics.has_self) + } + (Some((_, generics)), None) => { + (generics.own_count(), generics.has_self) + } + (None, None) => (0, false) + }; + let substs = Substs::for_item(self.tcx, def.def_id(), |def, _| { + let mut i = def.index as usize; + + let segment = if i < fn_start { + i -= has_self as usize; + type_segment + } else { + i -= fn_start; + fn_segment + }; + let lifetimes = match segment.map(|(s, _)| &s.parameters) { + Some(&hir::AngleBracketedParameters(ref data)) => &data.lifetimes[..], + Some(&hir::ParenthesizedParameters(_)) => bug!(), + None => &[] + }; + + if let Some(ast_lifetime) = lifetimes.get(i) { + ast_region_to_region(self.tcx, ast_lifetime) + } else { + self.region_var_for_def(span, def) + } + }, |def, substs| { + let mut i = def.index as usize; + + let can_omit = i >= fn_start || !require_type_space; + let segment = if i < fn_start { + // Handle Self first, so we can adjust the index to match the AST. + if has_self && i == 0 { + return opt_self_ty.unwrap_or_else(|| { + self.type_var_for_def(span, def, substs) + }); + } + i -= has_self as usize; + type_segment + } else { + i -= fn_start; + fn_segment + }; + let types = match segment.map(|(s, _)| &s.parameters) { + Some(&hir::AngleBracketedParameters(ref data)) => &data.types[..], + Some(&hir::ParenthesizedParameters(_)) => bug!(), + None => &[] + }; + + // Skip over the lifetimes in the same segment. + if let Some((_, generics)) = segment { + i -= generics.regions.len(); + } + + let omitted = can_omit && types.is_empty(); + if let Some(ast_ty) = types.get(i) { + // A provided type parameter. + self.to_ty(ast_ty) + } else if let (false, Some(default)) = (omitted, def.default) { + // No type parameter provided, but a default exists. + default.subst_spanned(self.tcx, substs, Some(span)) + } else { + // No type parameters were provided, we can infer all. + // This can also be reached in some error cases: + // We prefer to use inference variables instead of + // TyError to let type inference recover somewhat. + self.type_var_for_def(span, def, substs) + } + }); // The things we are substituting into the type should not contain // escaping late-bound regions, and nor should the base type scheme. - let substs = self.tcx.mk_substs(substs); - assert!(!substs.has_regions_escaping_depth(0)); - assert!(!type_scheme.has_escaping_regions()); + let scheme = self.tcx.lookup_item_type(def.def_id()); + let type_predicates = self.tcx.lookup_predicates(def.def_id()); + assert!(!substs.has_escaping_regions()); + assert!(!scheme.ty.has_escaping_regions()); // Add all the obligations that are required, substituting and // normalized appropriately. @@ -4338,7 +4319,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Substitute the values for the type parameters into the type of // the referenced item. - let ty_substituted = self.instantiate_type_scheme(span, &substs, &type_scheme.ty); + let ty_substituted = self.instantiate_type_scheme(span, &substs, &scheme.ty); if let Some((ty::ImplContainer(impl_def_id), self_ty)) = ufcs_associated { @@ -4347,10 +4328,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // type parameters, which we can infer by unifying the provided `Self` // with the substituted impl type. let impl_scheme = self.tcx.lookup_item_type(impl_def_id); - assert_eq!(substs.types.len(subst::TypeSpace), - impl_scheme.generics.types.len(subst::TypeSpace)); - assert_eq!(substs.regions.len(subst::TypeSpace), - impl_scheme.generics.regions.len(subst::TypeSpace)); let impl_ty = self.instantiate_type_scheme(span, &substs, &impl_scheme.ty); match self.sub_types(false, TypeOrigin::Misc(span), self_ty, impl_ty) { @@ -4370,250 +4347,92 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { debug!("instantiate_value_path: type of {:?} is {:?}", node_id, ty_substituted); - self.write_ty(node_id, ty_substituted); self.write_substs(node_id, ty::ItemSubsts { substs: substs }); ty_substituted } - /// Finds the parameters that the user provided and adds them to `substs`. If too many - /// parameters are provided, then reports an error and clears the output vector. - /// - /// We clear the output vector because that will cause the `adjust_XXX_parameters()` later to - /// use inference variables. This seems less likely to lead to derived errors. - /// - /// Note that we *do not* check for *too few* parameters here. Due to the presence of defaults - /// etc that is more complicated. I wanted however to do the reporting of *too many* parameters - /// here because we can easily use the precise span of the N+1'th parameter. - fn push_explicit_parameters_from_segment_to_substs(&self, - space: subst::ParamSpace, - span: Span, - type_defs: &VecPerParamSpace>, - region_defs: &VecPerParamSpace, - segment: &hir::PathSegment, - substs: &mut Substs<'tcx>) - { - match segment.parameters { - hir::AngleBracketedParameters(ref data) => { - self.push_explicit_angle_bracketed_parameters_from_segment_to_substs( - space, type_defs, region_defs, data, substs); + /// Report errors if the provided parameters are too few or too many. + fn check_path_parameter_count(&self, + span: Span, + can_omit: bool, + segment: &mut Option<(&hir::PathSegment, &ty::Generics)>) { + let (lifetimes, types, bindings) = match segment.map(|(s, _)| &s.parameters) { + Some(&hir::AngleBracketedParameters(ref data)) => { + (&data.lifetimes[..], &data.types[..], &data.bindings[..]) } - - hir::ParenthesizedParameters(ref data) => { - span_err!(self.tcx.sess, span, E0238, - "parenthesized parameters may only be used with a trait"); - self.push_explicit_parenthesized_parameters_from_segment_to_substs( - space, span, type_defs, data, substs); + Some(&hir::ParenthesizedParameters(_)) => { + span_bug!(span, "parenthesized parameters cannot appear in ExprPath"); } - } - } + None => (&[][..], &[][..], &[][..]) + }; - fn push_explicit_angle_bracketed_parameters_from_segment_to_substs(&self, - space: subst::ParamSpace, - type_defs: &VecPerParamSpace>, - region_defs: &VecPerParamSpace, - data: &hir::AngleBracketedParameterData, - substs: &mut Substs<'tcx>) - { - { - let type_count = type_defs.len(space); - assert_eq!(substs.types.len(space), 0); - for (i, typ) in data.types.iter().enumerate() { - let t = self.to_ty(&typ); - if i < type_count { - substs.types.push(space, t); - } else if i == type_count { - struct_span_err!(self.tcx.sess, typ.span, E0087, - "too many type parameters provided: \ - expected at most {} parameter{}, \ - found {} parameter{}", - type_count, - if type_count == 1 {""} else {"s"}, - data.types.len(), - if data.types.len() == 1 {""} else {"s"}) - .span_label(typ.span , &format!("expected {} parameter{}", - type_count, - if type_count == 1 {""} else {"s"})).emit(); - substs.types.truncate(space, 0); - break; - } - } - } + let count = |n| { + format!("{} parameter{}", n, if n == 1 { "" } else { "s" }) + }; - if !data.bindings.is_empty() { - span_err!(self.tcx.sess, data.bindings[0].span, E0182, - "unexpected binding of associated item in expression path \ - (only allowed in type paths)"); + // Check provided lifetime parameters. + let lifetime_defs = segment.map_or(&[][..], |(_, generics)| &generics.regions); + if lifetimes.len() > lifetime_defs.len() { + let span = lifetimes[lifetime_defs.len()].span; + span_err!(self.tcx.sess, span, E0088, + "too many lifetime parameters provided: \ + expected {}, found {}", + count(lifetime_defs.len()), + count(lifetimes.len())); + } else if lifetimes.len() > 0 && lifetimes.len() < lifetime_defs.len() { + span_err!(self.tcx.sess, span, E0090, + "too few lifetime parameters provided: \ + expected {}, found {}", + count(lifetime_defs.len()), + count(lifetimes.len())); } - { - let region_count = region_defs.len(space); - assert_eq!(substs.regions.len(space), 0); - for (i, lifetime) in data.lifetimes.iter().enumerate() { - let r = ast_region_to_region(self.tcx, lifetime); - if i < region_count { - substs.regions.push(space, r); - } else if i == region_count { - span_err!(self.tcx.sess, lifetime.span, E0088, - "too many lifetime parameters provided: \ - expected {} parameter{}, found {} parameter{}", - region_count, - if region_count == 1 {""} else {"s"}, - data.lifetimes.len(), - if data.lifetimes.len() == 1 {""} else {"s"}); - substs.regions.truncate(space, 0); - break; - } + // Check provided type parameters. + let type_defs = segment.map_or(&[][..], |(_, generics)| { + if generics.parent.is_none() { + &generics.types[generics.has_self as usize..] + } else { + &generics.types } - } - } - - /// As with - /// `push_explicit_angle_bracketed_parameters_from_segment_to_substs`, - /// but intended for `Foo(A,B) -> C` form. This expands to - /// roughly the same thing as `Foo<(A,B),C>`. One important - /// difference has to do with the treatment of anonymous - /// regions, which are translated into bound regions (NYI). - fn push_explicit_parenthesized_parameters_from_segment_to_substs(&self, - space: subst::ParamSpace, - span: Span, - type_defs: &VecPerParamSpace>, - data: &hir::ParenthesizedParameterData, - substs: &mut Substs<'tcx>) - { - let type_count = type_defs.len(space); - if type_count < 2 { - span_err!(self.tcx.sess, span, E0167, - "parenthesized form always supplies 2 type parameters, \ - but only {} parameter(s) were expected", - type_count); - } - - let input_tys: Vec = - data.inputs.iter().map(|ty| self.to_ty(&ty)).collect(); - - let tuple_ty = self.tcx.mk_tup(input_tys); - - if type_count >= 1 { - substs.types.push(space, tuple_ty); - } - - let output_ty: Option = - data.output.as_ref().map(|ty| self.to_ty(&ty)); - - let output_ty = - output_ty.unwrap_or(self.tcx.mk_nil()); - - if type_count >= 2 { - substs.types.push(space, output_ty); - } - } - - fn adjust_type_parameters(&self, - span: Span, - space: ParamSpace, - defs: &VecPerParamSpace>, - require_type_space: bool, - substs: &mut Substs<'tcx>) - { - let provided_len = substs.types.len(space); - let desired = defs.get_slice(space); - let required_len = desired.iter() - .take_while(|d| d.default.is_none()) - .count(); - - debug!("adjust_type_parameters(space={:?}, \ - provided_len={}, \ - desired_len={}, \ - required_len={})", - space, - provided_len, - desired.len(), - required_len); - - // Enforced by `push_explicit_parameters_from_segment_to_substs()`. - assert!(provided_len <= desired.len()); - - // Nothing specified at all: supply inference variables for - // everything. - if provided_len == 0 && !(require_type_space && space == subst::TypeSpace) { - substs.types.replace(space, Vec::new()); - self.type_vars_for_defs(span, space, substs, &desired[..]); - return; - } - - // Too few parameters specified: report an error and use Err - // for everything. - if provided_len < required_len { - let qualifier = - if desired.len() != required_len { "at least " } else { "" }; - span_err!(self.tcx.sess, span, E0089, - "too few type parameters provided: expected {}{} parameter{}, \ - found {} parameter{}", - qualifier, required_len, - if required_len == 1 {""} else {"s"}, - provided_len, - if provided_len == 1 {""} else {"s"}); - substs.types.replace(space, vec![self.tcx.types.err; desired.len()]); - return; - } - - // Otherwise, add in any optional parameters that the user - // omitted. The case of *too many* parameters is handled - // already by - // push_explicit_parameters_from_segment_to_substs(). Note - // that the *default* type are expressed in terms of all prior - // parameters, so we have to substitute as we go with the - // partial substitution that we have built up. - for i in provided_len..desired.len() { - let default = desired[i].default.unwrap(); - let default = default.subst_spanned(self.tcx, substs, Some(span)); - substs.types.push(space, default); - } - assert_eq!(substs.types.len(space), desired.len()); - - debug!("Final substs: {:?}", substs); - } - - fn adjust_region_parameters(&self, - span: Span, - space: ParamSpace, - defs: &VecPerParamSpace, - substs: &mut Substs) - { - let provided_len = substs.regions.len(space); - let desired = defs.get_slice(space); - - // Enforced by `push_explicit_parameters_from_segment_to_substs()`. - assert!(provided_len <= desired.len()); - - // If nothing was provided, just use inference variables. - if provided_len == 0 { - substs.regions.replace( - space, - self.region_vars_for_defs(span, desired)); - return; + }); + let required_len = type_defs.iter() + .take_while(|d| d.default.is_none()) + .count(); + if types.len() > type_defs.len() { + let span = types[type_defs.len()].span; + struct_span_err!(self.tcx.sess, span, E0087, + "too many type parameters provided: \ + expected at most {}, found {}", + count(type_defs.len()), + count(types.len())) + .span_label(span, &format!("too many type parameters")).emit(); + + // To prevent derived errors to accumulate due to extra + // type parameters, we force instantiate_value_path to + // use inference variables instead of the provided types. + *segment = None; + } else if !(can_omit && types.len() == 0) && types.len() < required_len { + let adjust = |len| if len > 1 { "parameters" } else { "parameter" }; + let required_param_str = adjust(required_len); + let actual_param_str = adjust(types.len()); + struct_span_err!(self.tcx.sess, span, E0089, + "too few type parameters provided: \ + expected {} {}, found {} {}", + count(required_len), + required_param_str, + count(types.len()), + actual_param_str) + .span_label(span, &format!("expected {} type {}", required_len, required_param_str)) + .emit(); } - // If just the right number were provided, everybody is happy. - if provided_len == desired.len() { - return; + if !bindings.is_empty() { + span_err!(self.tcx.sess, bindings[0].span, E0182, + "unexpected binding of associated item in expression path \ + (only allowed in type paths)"); } - - // Otherwise, too few were provided. Report an error and then - // use inference variables. - span_err!(self.tcx.sess, span, E0090, - "too few lifetime parameters provided: expected {} parameter{}, \ - found {} parameter{}", - desired.len(), - if desired.len() == 1 {""} else {"s"}, - provided_len, - if provided_len == 1 {""} else {"s"}); - - substs.regions.replace( - space, - self.region_vars_for_defs(span, desired)); } fn structurally_resolve_type_or_else(&self, sp: Span, ty: Ty<'tcx>, f: F) @@ -4674,28 +4493,28 @@ pub fn may_break(tcx: TyCtxt, id: ast::NodeId, b: &hir::Block) -> bool { } pub fn check_bounds_are_used<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - tps: &[hir::TyParam], + generics: &hir::Generics, ty: Ty<'tcx>) { debug!("check_bounds_are_used(n_tps={}, ty={:?})", - tps.len(), ty); + generics.ty_params.len(), ty); // make a vector of booleans initially false, set to true when used - if tps.is_empty() { return; } - let mut tps_used = vec![false; tps.len()]; + if generics.ty_params.is_empty() { return; } + let mut tps_used = vec![false; generics.ty_params.len()]; for leaf_ty in ty.walk() { if let ty::TyParam(ParamTy {idx, ..}) = leaf_ty.sty { debug!("Found use of ty param num {}", idx); - tps_used[idx as usize] = true; + tps_used[idx as usize - generics.lifetimes.len()] = true; } } - for (i, b) in tps_used.iter().enumerate() { - if !*b { - struct_span_err!(ccx.tcx.sess, tps[i].span, E0091, + for (&used, param) in tps_used.iter().zip(&generics.ty_params) { + if !used { + struct_span_err!(ccx.tcx.sess, param.span, E0091, "type parameter `{}` is unused", - tps[i].name) - .span_label(tps[i].span, &format!("unused type parameter")) + param.name) + .span_label(param.span, &format!("unused type parameter")) .emit(); } } diff --git a/src/librustc_typeck/check/op.rs b/src/librustc_typeck/check/op.rs index a8b1683f6d..411bd7e7b5 100644 --- a/src/librustc_typeck/check/op.rs +++ b/src/librustc_typeck/check/op.rs @@ -23,21 +23,22 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { expr: &'gcx hir::Expr, op: hir::BinOp, lhs_expr: &'gcx hir::Expr, - rhs_expr: &'gcx hir::Expr) + rhs_expr: &'gcx hir::Expr) -> Ty<'tcx> { - self.check_expr_with_lvalue_pref(lhs_expr, PreferMutLvalue); + let lhs_ty = self.check_expr_with_lvalue_pref(lhs_expr, PreferMutLvalue); - let lhs_ty = self.resolve_type_vars_with_obligations(self.expr_ty(lhs_expr)); + let lhs_ty = self.resolve_type_vars_with_obligations(lhs_ty); let (rhs_ty, return_ty) = self.check_overloaded_binop(expr, lhs_expr, lhs_ty, rhs_expr, op, IsAssign::Yes); let rhs_ty = self.resolve_type_vars_with_obligations(rhs_ty); - if !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() && is_builtin_binop(lhs_ty, rhs_ty, op) { + let ty = if !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() + && is_builtin_binop(lhs_ty, rhs_ty, op) { self.enforce_builtin_binop_types(lhs_expr, lhs_ty, rhs_expr, rhs_ty, op); - self.write_nil(expr.id); + self.tcx.mk_nil() } else { - self.write_ty(expr.id, return_ty); - } + return_ty + }; let tcx = self.tcx; if !tcx.expr_is_lval(lhs_expr) { @@ -49,6 +50,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { &format!("invalid expression for left-hand side")) .emit(); } + ty } /// Check a potentially overloaded binary operator. @@ -56,7 +58,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { expr: &'gcx hir::Expr, op: hir::BinOp, lhs_expr: &'gcx hir::Expr, - rhs_expr: &'gcx hir::Expr) + rhs_expr: &'gcx hir::Expr) -> Ty<'tcx> { let tcx = self.tcx; @@ -67,15 +69,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { lhs_expr, rhs_expr); - self.check_expr(lhs_expr); - let lhs_ty = self.resolve_type_vars_with_obligations(self.expr_ty(lhs_expr)); + let lhs_ty = self.check_expr(lhs_expr); + let lhs_ty = self.resolve_type_vars_with_obligations(lhs_ty); match BinOpCategory::from(op) { BinOpCategory::Shortcircuit => { // && and || are a simple case. self.demand_suptype(lhs_expr.span, tcx.mk_bool(), lhs_ty); self.check_expr_coercable_to_type(rhs_expr, tcx.mk_bool()); - self.write_ty(expr.id, tcx.mk_bool()); + tcx.mk_bool() } _ => { // Otherwise, we always treat operators as if they are @@ -107,7 +109,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.demand_suptype(expr.span, builtin_return_ty, return_ty); } - self.write_ty(expr.id, return_ty); + return_ty } } } diff --git a/src/librustc_typeck/check/regionck.rs b/src/librustc_typeck/check/regionck.rs index 22ffcfbaae..939deee27c 100644 --- a/src/librustc_typeck/check/regionck.rs +++ b/src/librustc_typeck/check/regionck.rs @@ -169,7 +169,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub struct RegionCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { pub fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, - region_bound_pairs: Vec<(ty::Region, GenericKind<'tcx>)>, + region_bound_pairs: Vec<(&'tcx ty::Region, GenericKind<'tcx>)>, free_region_map: FreeRegionMap, @@ -324,9 +324,10 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { let call_site_scope = self.call_site_scope.unwrap(); debug!("visit_fn_body body.id {} call_site_scope: {:?}", body.id, call_site_scope); + let call_site_region = self.tcx.mk_region(ty::ReScope(call_site_scope)); self.type_of_node_must_outlive(infer::CallReturn(span), body.id, - ty::ReScope(call_site_scope)); + call_site_region); self.region_bound_pairs.truncate(old_region_bounds_pairs_len); @@ -407,8 +408,8 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { for implication in implied_bounds { debug!("implication: {:?}", implication); match implication { - ImpliedBound::RegionSubRegion(ty::ReFree(free_a), - ty::ReVar(vid_b)) => { + ImpliedBound::RegionSubRegion(&ty::ReFree(free_a), + &ty::ReVar(vid_b)) => { self.add_given(free_a, vid_b); } ImpliedBound::RegionSubParam(r_a, param_b) => { @@ -476,9 +477,10 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { // variable's type enclose at least the variable's scope. let var_scope = tcx.region_maps.var_scope(id); + let var_region = self.tcx.mk_region(ty::ReScope(var_scope)); let origin = infer::BindingTypeIsNotValidAtDecl(span); - self.type_of_node_must_outlive(origin, id, ty::ReScope(var_scope)); + self.type_of_node_must_outlive(origin, id, var_region); let typ = self.resolve_node_type(id); dropck::check_safety_of_destructor_if_necessary(self, typ, span, var_scope); @@ -525,7 +527,7 @@ impl<'a, 'gcx, 'tcx, 'v> Visitor<'v> for RegionCtxt<'a, 'gcx, 'tcx> { // scope of that expression. This also guarantees basic WF. let expr_ty = self.resolve_node_type(expr.id); // the region corresponding to this expression - let expr_region = ty::ReScope(self.tcx.region_maps.node_extent(expr.id)); + let expr_region = self.tcx.node_scope_region(expr.id); self.type_must_outlive(infer::ExprTypeIsNotInScope(expr_ty, expr.span), expr_ty, expr_region); @@ -574,7 +576,7 @@ impl<'a, 'gcx, 'tcx, 'v> Visitor<'v> for RegionCtxt<'a, 'gcx, 'tcx> { } } /* - adjustment::AutoObject(_, ref bounds, _, _) => { + adjustment::AutoObject(_, ref bounds, ..) => { // Determine if we are casting `expr` to a trait // instance. If so, we have to be sure that the type // of the source obeys the new region bound. @@ -641,7 +643,7 @@ impl<'a, 'gcx, 'tcx, 'v> Visitor<'v> for RegionCtxt<'a, 'gcx, 'tcx> { intravisit::walk_expr(self, expr); } - hir::ExprMethodCall(_, _, ref args) => { + hir::ExprMethodCall(.., ref args) => { self.constrain_call(expr, Some(&args[0]), args[1..].iter().map(|e| &**e), false); @@ -713,7 +715,7 @@ impl<'a, 'gcx, 'tcx, 'v> Visitor<'v> for RegionCtxt<'a, 'gcx, 'tcx> { None => self.resolve_node_type(base.id) }; if let ty::TyRef(r_ptr, _) = base_ty.sty { - self.mk_subregion_due_to_dereference(expr.span, expr_region, *r_ptr); + self.mk_subregion_due_to_dereference(expr.span, expr_region, r_ptr); } intravisit::walk_expr(self, expr); @@ -756,7 +758,7 @@ impl<'a, 'gcx, 'tcx, 'v> Visitor<'v> for RegionCtxt<'a, 'gcx, 'tcx> { intravisit::walk_expr(self, expr); } - hir::ExprClosure(_, _, ref body, _) => { + hir::ExprClosure(.., ref body, _) => { self.check_expr_fn_block(expr, &body); } @@ -780,9 +782,10 @@ impl<'a, 'gcx, 'tcx, 'v> Visitor<'v> for RegionCtxt<'a, 'gcx, 'tcx> { let call_site_scope = self.call_site_scope; debug!("visit_expr ExprRet ret_expr.id {} call_site_scope: {:?}", ret_expr.id, call_site_scope); + let call_site_region = self.tcx.mk_region(ty::ReScope(call_site_scope.unwrap())); self.type_of_node_must_outlive(infer::CallReturn(ret_expr.span), ret_expr.id, - ty::ReScope(call_site_scope.unwrap())); + call_site_region); intravisit::walk_expr(self, expr); } @@ -819,16 +822,16 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { /*From:*/ (&ty::TyRef(from_r, ref from_mt), /*To: */ &ty::TyRef(to_r, ref to_mt)) => { // Target cannot outlive source, naturally. - self.sub_regions(infer::Reborrow(cast_expr.span), *to_r, *from_r); + self.sub_regions(infer::Reborrow(cast_expr.span), to_r, from_r); self.walk_cast(cast_expr, from_mt.ty, to_mt.ty); } /*From:*/ (_, - /*To: */ &ty::TyTrait(box ty::TraitTy { ref bounds, .. })) => { + /*To: */ &ty::TyTrait(ref obj)) => { // When T is existentially quantified as a trait // `Foo+'to`, it must outlive the region bound `'to`. self.type_must_outlive(infer::RelateObjectBound(cast_expr.span), - from_ty, bounds.region_bound); + from_ty, obj.region_bound); } /*From:*/ (&ty::TyBox(from_referent_ty), @@ -889,7 +892,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { // // FIXME(#6268) to support nested method calls, should be callee_id let callee_scope = self.tcx.region_maps.node_extent(call_expr.id); - let callee_region = ty::ReScope(callee_scope); + let callee_region = self.tcx.mk_region(ty::ReScope(callee_scope)); debug!("callee_region={:?}", callee_region); @@ -933,8 +936,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { derefs, derefd_ty); - let s_deref_expr = self.tcx.region_maps.node_extent(deref_expr.id); - let r_deref_expr = ty::ReScope(s_deref_expr); + let r_deref_expr = self.tcx.node_scope_region(deref_expr.id); for i in 0..derefs { let method_call = MethodCall::autoderef(deref_expr.id, i as u32); debug!("constrain_autoderefs: method_call={:?} (of {:?} total)", method_call, derefs); @@ -989,7 +991,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { if let ty::TyRef(r_ptr, _) = derefd_ty.sty { self.mk_subregion_due_to_dereference(deref_expr.span, - r_deref_expr, *r_ptr); + r_deref_expr, r_ptr); } match derefd_ty.builtin_deref(true, ty::NoPreference) { @@ -1003,8 +1005,8 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { pub fn mk_subregion_due_to_dereference(&mut self, deref_span: Span, - minimum_lifetime: ty::Region, - maximum_lifetime: ty::Region) { + minimum_lifetime: &'tcx ty::Region, + maximum_lifetime: &'tcx ty::Region) { self.sub_regions(infer::DerefPointer(deref_span), minimum_lifetime, maximum_lifetime) } @@ -1014,7 +1016,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { span: Span) { match cmt.cat { Categorization::Rvalue(region) => { - match region { + match *region { ty::ReScope(rvalue_scope) => { let typ = self.resolve_type(cmt.ty); dropck::check_safety_of_destructor_if_necessary(self, @@ -1023,7 +1025,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { rvalue_scope); } ty::ReStatic => {} - region => { + _ => { span_bug!(span, "unexpected rvalue region in rvalue \ destructor safety checking: `{:?}`", @@ -1049,7 +1051,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { match mt.ty.sty { ty::TySlice(_) | ty::TyStr => { self.sub_regions(infer::IndexSlice(index_expr.span), - r_index_expr, *r_ptr); + self.tcx.mk_region(r_index_expr), r_ptr); } _ => {} } @@ -1061,7 +1063,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { fn type_of_node_must_outlive(&mut self, origin: infer::SubregionOrigin<'tcx>, id: ast::NodeId, - minimum_lifetime: ty::Region) + minimum_lifetime: &'tcx ty::Region) { let tcx = self.tcx; @@ -1132,7 +1134,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { let mc = mc::MemCategorizationContext::new(self); for arg in args { let arg_ty = self.node_ty(arg.id); - let re_scope = ty::ReScope(body_scope); + let re_scope = self.tcx.mk_region(ty::ReScope(body_scope)); let arg_cmt = mc.cat_rvalue(arg.id, arg.ty.span, re_scope, arg_ty); debug!("arg_ty={:?} arg_cmt={:?} arg={:?}", arg_ty, @@ -1154,7 +1156,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { let _ = mc.cat_pattern(discr_cmt, root_pat, |_, sub_cmt, sub_pat| { match sub_pat.node { // `ref x` pattern - PatKind::Binding(hir::BindByRef(mutbl), _, _) => { + PatKind::Binding(hir::BindByRef(mutbl), ..) => { self.link_region_from_node_type(sub_pat.span, sub_pat.id, mutbl, sub_cmt); } @@ -1168,7 +1170,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { fn link_autoref(&self, expr: &hir::Expr, autoderefs: usize, - autoref: &adjustment::AutoRef) + autoref: &adjustment::AutoRef<'tcx>) { debug!("link_autoref(autoref={:?})", autoref); let mc = mc::MemCategorizationContext::new(self); @@ -1182,8 +1184,8 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { } adjustment::AutoUnsafe(m) => { - let r = ty::ReScope(self.tcx.region_maps.node_extent(expr.id)); - self.link_region(expr.span, &r, ty::BorrowKind::from_mutbl(m), expr_cmt); + let r = self.tcx.node_scope_region(expr.id); + self.link_region(expr.span, r, ty::BorrowKind::from_mutbl(m), expr_cmt); } } } @@ -1197,8 +1199,8 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { expr, callee_scope); let mc = mc::MemCategorizationContext::new(self); let expr_cmt = ignore_err!(mc.cat_expr(expr)); - let borrow_region = ty::ReScope(callee_scope); - self.link_region(expr.span, &borrow_region, ty::ImmBorrow, expr_cmt); + let borrow_region = self.tcx.mk_region(ty::ReScope(callee_scope)); + self.link_region(expr.span, borrow_region, ty::ImmBorrow, expr_cmt); } /// Like `link_region()`, except that the region is extracted from the type of `id`, @@ -1212,9 +1214,9 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { id, mutbl, cmt_borrowed); let rptr_ty = self.resolve_node_type(id); - if let ty::TyRef(&r, _) = rptr_ty.sty { + if let ty::TyRef(r, _) = rptr_ty.sty { debug!("rptr_ty={}", rptr_ty); - self.link_region(span, &r, ty::BorrowKind::from_mutbl(mutbl), + self.link_region(span, r, ty::BorrowKind::from_mutbl(mutbl), cmt_borrowed); } } @@ -1225,14 +1227,14 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { /// between regions, as explained in `link_reborrowed_region()`. fn link_region(&self, span: Span, - borrow_region: &ty::Region, + borrow_region: &'tcx ty::Region, borrow_kind: ty::BorrowKind, borrow_cmt: mc::cmt<'tcx>) { let mut borrow_cmt = borrow_cmt; let mut borrow_kind = borrow_kind; let origin = infer::DataBorrowed(borrow_cmt.ty, span); - self.type_must_outlive(origin, borrow_cmt.ty, *borrow_region); + self.type_must_outlive(origin, borrow_cmt.ty, borrow_region); loop { debug!("link_region(borrow_region={:?}, borrow_kind={:?}, borrow_cmt={:?})", @@ -1267,7 +1269,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { borrow_kind = borrow_kind; } - Categorization::Deref(_, _, mc::UnsafePtr(..)) | + Categorization::Deref(.., mc::UnsafePtr(..)) | Categorization::StaticItem | Categorization::Upvar(..) | Categorization::Local(..) | @@ -1322,10 +1324,10 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { /// recurse and process `ref_cmt` (see case 2 above). fn link_reborrowed_region(&self, span: Span, - borrow_region: &ty::Region, + borrow_region: &'tcx ty::Region, borrow_kind: ty::BorrowKind, ref_cmt: mc::cmt<'tcx>, - ref_region: ty::Region, + ref_region: &'tcx ty::Region, mut ref_kind: ty::BorrowKind, note: mc::Note) -> Option<(mc::cmt<'tcx>, ty::BorrowKind)> @@ -1364,7 +1366,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { debug!("link_reborrowed_region: {:?} <= {:?}", borrow_region, ref_region); - self.sub_regions(cause, *borrow_region, ref_region); + self.sub_regions(cause, borrow_region, ref_region); // If we end up needing to recurse and establish a region link // with `ref_cmt`, calculate what borrow kind we will end up @@ -1436,7 +1438,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { origin: infer::ParameterOrigin, substs: &Substs<'tcx>, expr_span: Span, - expr_region: ty::Region) { + expr_region: &'tcx ty::Region) { debug!("substs_wf_in_scope(substs={:?}, \ expr_region={:?}, \ origin={:?}, \ @@ -1445,11 +1447,11 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { let origin = infer::ParameterInScope(origin, expr_span); - for ®ion in &substs.regions { + for region in substs.regions() { self.sub_regions(origin.clone(), expr_region, region); } - for &ty in &substs.types { + for ty in substs.types() { let ty = self.resolve_type(ty); self.type_must_outlive(origin.clone(), ty, expr_region); } @@ -1461,7 +1463,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { pub fn type_must_outlive(&self, origin: infer::SubregionOrigin<'tcx>, ty: Ty<'tcx>, - region: ty::Region) + region: &'tcx ty::Region) { let ty = self.resolve_type(ty); @@ -1479,7 +1481,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { fn components_must_outlive(&self, origin: infer::SubregionOrigin<'tcx>, components: Vec>, - region: ty::Region) + region: &'tcx ty::Region) { for component in components { let origin = origin.clone(); @@ -1510,7 +1512,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { fn param_ty_must_outlive(&self, origin: infer::SubregionOrigin<'tcx>, - region: ty::Region, + region: &'tcx ty::Region, param_ty: ty::ParamTy) { debug!("param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})", region, param_ty, origin); @@ -1522,7 +1524,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { fn projection_must_outlive(&self, origin: infer::SubregionOrigin<'tcx>, - region: ty::Region, + region: &'tcx ty::Region, projection_ty: ty::ProjectionTy<'tcx>) { debug!("projection_must_outlive(region={:?}, projection_ty={:?}, origin={:?})", @@ -1552,7 +1554,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { // If we know that the projection outlives 'static, then we're // done here. - if env_bounds.contains(&ty::ReStatic) { + if env_bounds.contains(&&ty::ReStatic) { debug!("projection_must_outlive: 'static as declared bound"); return; } @@ -1571,18 +1573,15 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { // the problem is to add `T: 'r`, which isn't true. So, if there are no // inference variables, we use a verify constraint instead of adding // edges, which winds up enforcing the same condition. - let needs_infer = { - projection_ty.trait_ref.substs.types.iter().any(|t| t.needs_infer()) || - projection_ty.trait_ref.substs.regions.iter().any(|r| r.needs_infer()) - }; + let needs_infer = projection_ty.trait_ref.needs_infer(); if env_bounds.is_empty() && needs_infer { debug!("projection_must_outlive: no declared bounds"); - for &component_ty in &projection_ty.trait_ref.substs.types { + for component_ty in projection_ty.trait_ref.substs.types() { self.type_must_outlive(origin.clone(), component_ty, region); } - for &r in &projection_ty.trait_ref.substs.regions { + for r in projection_ty.trait_ref.substs.regions() { self.sub_regions(origin.clone(), region, r); } @@ -1600,10 +1599,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { if !env_bounds.is_empty() && env_bounds[1..].iter().all(|b| *b == env_bounds[0]) { let unique_bound = env_bounds[0]; debug!("projection_must_outlive: unique declared bound = {:?}", unique_bound); - if projection_ty.trait_ref.substs.regions - .iter() - .any(|r| env_bounds.contains(r)) - { + if projection_ty.trait_ref.substs.regions().any(|r| env_bounds.contains(&r)) { debug!("projection_must_outlive: unique declared bound appears in trait ref"); self.sub_regions(origin.clone(), region, unique_bound); return; @@ -1620,7 +1616,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { self.verify_generic_bound(origin, generic.clone(), region, verify_bound); } - fn type_bound(&self, span: Span, ty: Ty<'tcx>) -> VerifyBound { + fn type_bound(&self, span: Span, ty: Ty<'tcx>) -> VerifyBound<'tcx> { match ty.sty { ty::TyParam(p) => { self.param_bound(p) @@ -1635,7 +1631,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { } } - fn param_bound(&self, param_ty: ty::ParamTy) -> VerifyBound { + fn param_bound(&self, param_ty: ty::ParamTy) -> VerifyBound<'tcx> { let param_env = &self.parameter_environment; debug!("param_bound(param_ty={:?})", @@ -1653,7 +1649,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { fn projection_declared_bounds(&self, span: Span, projection_ty: ty::ProjectionTy<'tcx>) - -> Vec + -> Vec<&'tcx ty::Region> { // First assemble bounds from where clauses and traits. @@ -1668,9 +1664,9 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { fn projection_bound(&self, span: Span, - declared_bounds: Vec, + declared_bounds: Vec<&'tcx ty::Region>, projection_ty: ty::ProjectionTy<'tcx>) - -> VerifyBound { + -> VerifyBound<'tcx> { debug!("projection_bound(declared_bounds={:?}, projection_ty={:?})", declared_bounds, projection_ty); @@ -1682,7 +1678,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { VerifyBound::AnyRegion(declared_bounds).or(recursive_bound) } - fn recursive_type_bound(&self, span: Span, ty: Ty<'tcx>) -> VerifyBound { + fn recursive_type_bound(&self, span: Span, ty: Ty<'tcx>) -> VerifyBound<'tcx> { let mut bounds = vec![]; for subty in ty.walk_shallow() { @@ -1704,7 +1700,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { } fn declared_generic_bounds_from_env(&self, generic: GenericKind<'tcx>) - -> Vec + -> Vec<&'tcx ty::Region> { let param_env = &self.parameter_environment; @@ -1738,7 +1734,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { fn declared_projection_bounds_from_trait(&self, span: Span, projection_ty: ty::ProjectionTy<'tcx>) - -> Vec + -> Vec<&'tcx ty::Region> { debug!("projection_bounds(projection_ty={:?})", projection_ty); @@ -1757,6 +1753,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { // // we can thus deduce that `>::SomeType : 'a`. let trait_predicates = self.tcx.lookup_predicates(projection_ty.trait_ref.def_id); + assert_eq!(trait_predicates.parent, None); let predicates = trait_predicates.predicates.as_slice().to_vec(); traits::elaborate_predicates(self.tcx, predicates) .filter_map(|predicate| { diff --git a/src/librustc_typeck/check/upvar.rs b/src/librustc_typeck/check/upvar.rs index 030491b521..aa221c33b5 100644 --- a/src/librustc_typeck/check/upvar.rs +++ b/src/librustc_typeck/check/upvar.rs @@ -120,7 +120,8 @@ impl<'a, 'gcx, 'tcx> SeedBorrowKind<'a, 'gcx, 'tcx> { self.fcx.tcx.with_freevars(expr.id, |freevars| { for freevar in freevars { - let var_node_id = freevar.def.var_id(); + let def_id = freevar.def.def_id(); + let var_node_id = self.fcx.tcx.map.as_local_node_id(def_id).unwrap(); let upvar_id = ty::UpvarId { var_id: var_node_id, closure_expr_id: expr.id }; debug!("seed upvar_id {:?}", upvar_id); @@ -236,31 +237,30 @@ impl<'a, 'gcx, 'tcx> AdjustBorrowKind<'a, 'gcx, 'tcx> { // implemented. let tcx = self.fcx.tcx; tcx.with_freevars(closure_id, |freevars| { - freevars.iter() - .map(|freevar| { - let freevar_node_id = freevar.def.var_id(); - let freevar_ty = self.fcx.node_ty(freevar_node_id); - let upvar_id = ty::UpvarId { - var_id: freevar_node_id, - closure_expr_id: closure_id - }; - let capture = self.fcx.upvar_capture(upvar_id).unwrap(); - - debug!("freevar_node_id={:?} freevar_ty={:?} capture={:?}", - freevar_node_id, freevar_ty, capture); - - match capture { - ty::UpvarCapture::ByValue => freevar_ty, - ty::UpvarCapture::ByRef(borrow) => - tcx.mk_ref(tcx.mk_region(borrow.region), - ty::TypeAndMut { - ty: freevar_ty, - mutbl: borrow.kind.to_mutbl_lossy(), - }), - } - }) - .collect() - }) + freevars.iter().map(|freevar| { + let def_id = freevar.def.def_id(); + let var_id = tcx.map.as_local_node_id(def_id).unwrap(); + let freevar_ty = self.fcx.node_ty(var_id); + let upvar_id = ty::UpvarId { + var_id: var_id, + closure_expr_id: closure_id + }; + let capture = self.fcx.upvar_capture(upvar_id).unwrap(); + + debug!("var_id={:?} freevar_ty={:?} capture={:?}", + var_id, freevar_ty, capture); + + match capture { + ty::UpvarCapture::ByValue => freevar_ty, + ty::UpvarCapture::ByRef(borrow) => + tcx.mk_ref(borrow.region, + ty::TypeAndMut { + ty: freevar_ty, + mutbl: borrow.kind.to_mutbl_lossy(), + }), + } + }).collect() + }) } fn adjust_upvar_borrow_kind_for_consume(&mut self, @@ -283,8 +283,8 @@ impl<'a, 'gcx, 'tcx> AdjustBorrowKind<'a, 'gcx, 'tcx> { debug!("adjust_upvar_borrow_kind_for_consume: guarantor={:?}", guarantor); match guarantor.cat { - Categorization::Deref(_, _, mc::BorrowedPtr(..)) | - Categorization::Deref(_, _, mc::Implicit(..)) => { + Categorization::Deref(.., mc::BorrowedPtr(..)) | + Categorization::Deref(.., mc::Implicit(..)) => { match cmt.note { mc::NoteUpvarRef(upvar_id) => { debug!("adjust_upvar_borrow_kind_for_consume: \ @@ -344,7 +344,7 @@ impl<'a, 'gcx, 'tcx> AdjustBorrowKind<'a, 'gcx, 'tcx> { } } - Categorization::Deref(_, _, mc::UnsafePtr(..)) | + Categorization::Deref(.., mc::UnsafePtr(..)) | Categorization::StaticItem | Categorization::Rvalue(_) | Categorization::Local(_) | @@ -376,7 +376,7 @@ impl<'a, 'gcx, 'tcx> AdjustBorrowKind<'a, 'gcx, 'tcx> { } } - Categorization::Deref(_, _, mc::UnsafePtr(..)) | + Categorization::Deref(.., mc::UnsafePtr(..)) | Categorization::StaticItem | Categorization::Rvalue(_) | Categorization::Local(_) | @@ -536,7 +536,7 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for AdjustBorrowKind<'a, 'gcx, 'tcx> { borrow_id: ast::NodeId, _borrow_span: Span, cmt: mc::cmt<'tcx>, - _loan_region: ty::Region, + _loan_region: &'tcx ty::Region, bk: ty::BorrowKind, _loan_cause: euv::LoanCause) { diff --git a/src/librustc_typeck/check/wfcheck.rs b/src/librustc_typeck/check/wfcheck.rs index 4bb9f4fd33..bc5cb68995 100644 --- a/src/librustc_typeck/check/wfcheck.rs +++ b/src/librustc_typeck/check/wfcheck.rs @@ -14,13 +14,11 @@ use CrateCtxt; use hir::def_id::DefId; use middle::region::{CodeExtent}; use rustc::infer::TypeOrigin; -use rustc::ty::subst::{self, TypeSpace, FnSpace, ParamSpace, SelfSpace}; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; +use rustc::util::nodemap::{FnvHashSet, FnvHashMap}; -use std::collections::HashSet; use syntax::ast; -use syntax::parse::token::keywords; use syntax_pos::Span; use errors::DiagnosticBuilder; @@ -114,7 +112,7 @@ impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { ref trait_ref, ref self_ty, _) => { self.check_impl(item, self_ty, trait_ref); } - hir::ItemImpl(_, hir::ImplPolarity::Negative, _, Some(_), _, _) => { + hir::ItemImpl(_, hir::ImplPolarity::Negative, _, Some(_), ..) => { // FIXME(#27579) what amount of WF checking do we need for neg impls? let trait_ref = ccx.tcx.impl_trait_ref(ccx.tcx.map.local_def_id(item.id)).unwrap(); @@ -128,7 +126,7 @@ impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { } } } - hir::ItemFn(_, _, _, _, _, ref body) => { + hir::ItemFn(.., ref body) => { self.check_item_fn(item, body); } hir::ItemStatic(..) => { @@ -138,20 +136,27 @@ impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { self.check_item_type(item); } hir::ItemStruct(ref struct_def, ref ast_generics) => { - self.check_type_defn(item, |fcx| { + self.check_type_defn(item, false, |fcx| { + vec![fcx.struct_variant(struct_def)] + }); + + self.check_variances_for_type_defn(item, ast_generics); + } + hir::ItemUnion(ref struct_def, ref ast_generics) => { + self.check_type_defn(item, true, |fcx| { vec![fcx.struct_variant(struct_def)] }); self.check_variances_for_type_defn(item, ast_generics); } hir::ItemEnum(ref enum_def, ref ast_generics) => { - self.check_type_defn(item, |fcx| { + self.check_type_defn(item, false, |fcx| { fcx.enum_variants(enum_def) }); self.check_variances_for_type_defn(item, ast_generics); } - hir::ItemTrait(_, _, _, ref items) => { + hir::ItemTrait(.., ref items) => { self.check_trait(item, items); } _ => {} @@ -218,24 +223,22 @@ impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { } /// In a type definition, we check that to ensure that the types of the fields are well-formed. - fn check_type_defn(&mut self, item: &hir::Item, mut lookup_fields: F) where - F: for<'fcx, 'tcx> FnMut(&FnCtxt<'fcx, 'gcx, 'tcx>) - -> Vec> + fn check_type_defn(&mut self, item: &hir::Item, all_sized: bool, mut lookup_fields: F) + where F: for<'fcx, 'tcx> FnMut(&FnCtxt<'fcx, 'gcx, 'tcx>) -> Vec> { self.for_item(item).with_fcx(|fcx, this| { let variants = lookup_fields(fcx); for variant in &variants { // For DST, all intermediate types must be sized. - if let Some((_, fields)) = variant.fields.split_last() { - for field in fields { - fcx.register_builtin_bound( - field.ty, - ty::BoundSized, - traits::ObligationCause::new(field.span, - fcx.body_id, - traits::FieldSized)); - } + let unsized_len = if all_sized || variant.fields.is_empty() { 0 } else { 1 }; + for field in &variant.fields[..variant.fields.len() - unsized_len] { + fcx.register_builtin_bound( + field.ty, + ty::BoundSized, + traits::ObligationCause::new(field.span, + fcx.body_id, + traits::FieldSized)); } // All field types must be well-formed. @@ -253,6 +256,76 @@ impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { }); } + fn check_auto_trait(&mut self, + trait_def_id: DefId, + items: &[hir::TraitItem], + span: Span) + { + // We want to ensure: + // + // 1) that there are no items contained within + // the trait defintion + // + // 2) that the definition doesn't violate the no-super trait rule + // for auto traits. + // + // 3) that the trait definition does not have any type parameters + + let predicates = self.tcx().lookup_predicates(trait_def_id); + + // We must exclude the Self : Trait predicate contained by all + // traits. + let has_predicates = + predicates.predicates.iter().any(|predicate| { + match predicate { + &ty::Predicate::Trait(ref poly_trait_ref) => { + let self_ty = poly_trait_ref.0.self_ty(); + !(self_ty.is_self() && poly_trait_ref.def_id() == trait_def_id) + }, + _ => true, + } + }); + + let trait_def = self.tcx().lookup_trait_def(trait_def_id); + + let has_ty_params = + trait_def.generics + .types + .len() > 1; + + // We use an if-else here, since the generics will also trigger + // an extraneous error message when we find predicates like + // `T : Sized` for a trait like: `trait Magic`. + // + // We also put the check on the number of items here, + // as it seems confusing to report an error about + // extraneous predicates created by things like + // an associated type inside the trait. + let mut err = None; + if !items.is_empty() { + error_380(self.ccx, span); + } else if has_ty_params { + err = Some(struct_span_err!(self.tcx().sess, span, E0567, + "traits with auto impls (`e.g. impl \ + Trait for ..`) can not have type parameters")); + } else if has_predicates { + err = Some(struct_span_err!(self.tcx().sess, span, E0568, + "traits with auto impls (`e.g. impl \ + Trait for ..`) cannot have predicates")); + } + + // Finally if either of the above conditions apply we should add a note + // indicating that this error is the result of a recent soundness fix. + match err { + None => {}, + Some(mut e) => { + e.note("the new auto trait rules are the result of a \ + recent soundness fix; see #29859 for more details"); + e.emit(); + } + } + } + fn check_trait(&mut self, item: &hir::Item, items: &[hir::TraitItem]) @@ -260,9 +333,7 @@ impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { let trait_def_id = self.tcx().map.local_def_id(item.id); if self.tcx().trait_has_default_impl(trait_def_id) { - if !items.is_empty() { - error_380(self.ccx, item.span); - } + self.check_auto_trait(trait_def_id, items, item.span); } self.for_item(item).with_fcx(|fcx, this| { @@ -283,7 +354,7 @@ impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { let type_scheme = fcx.tcx.lookup_item_type(fcx.tcx.map.local_def_id(item.id)); let item_ty = fcx.instantiate_type_scheme(item.span, free_substs, &type_scheme.ty); let bare_fn_ty = match item_ty.sty { - ty::TyFnDef(_, _, ref bare_fn_ty) => bare_fn_ty, + ty::TyFnDef(.., ref bare_fn_ty) => bare_fn_ty, _ => { span_bug!(item.span, "Fn item without fn type"); } @@ -418,7 +489,7 @@ impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { ty::ExplicitSelfCategory::Static => return, ty::ExplicitSelfCategory::ByValue => self_ty, ty::ExplicitSelfCategory::ByReference(region, mutability) => { - fcx.tcx.mk_ref(fcx.tcx.mk_region(region), ty::TypeAndMut { + fcx.tcx.mk_ref(region, ty::TypeAndMut { ty: self_ty, mutbl: mutability }) @@ -455,70 +526,36 @@ impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { let item_def_id = self.tcx().map.local_def_id(item.id); let ty_predicates = self.tcx().lookup_predicates(item_def_id); + assert_eq!(ty_predicates.parent, None); let variances = self.tcx().item_variances(item_def_id); - let mut constrained_parameters: HashSet<_> = - variances.types - .iter_enumerated() - .filter(|&(_, _, &variance)| variance != ty::Bivariant) - .map(|(space, index, _)| self.param_ty(ast_generics, space, index)) - .map(|p| Parameter::Type(p)) + let mut constrained_parameters: FnvHashSet<_> = + variances.iter().enumerate() + .filter(|&(_, &variance)| variance != ty::Bivariant) + .map(|(index, _)| Parameter(index as u32)) .collect(); identify_constrained_type_params(ty_predicates.predicates.as_slice(), None, &mut constrained_parameters); - for (space, index, _) in variances.types.iter_enumerated() { - let param_ty = self.param_ty(ast_generics, space, index); - if constrained_parameters.contains(&Parameter::Type(param_ty)) { - continue; - } - let span = self.ty_param_span(ast_generics, item, space, index); - self.report_bivariance(span, param_ty.name); - } - - for (space, index, &variance) in variances.regions.iter_enumerated() { - if variance != ty::Bivariant { + for (index, _) in variances.iter().enumerate() { + if constrained_parameters.contains(&Parameter(index as u32)) { continue; } - assert_eq!(space, TypeSpace); - let span = ast_generics.lifetimes[index].lifetime.span; - let name = ast_generics.lifetimes[index].lifetime.name; + let (span, name) = if index < ast_generics.lifetimes.len() { + (ast_generics.lifetimes[index].lifetime.span, + ast_generics.lifetimes[index].lifetime.name) + } else { + let index = index - ast_generics.lifetimes.len(); + (ast_generics.ty_params[index].span, + ast_generics.ty_params[index].name) + }; self.report_bivariance(span, name); } } - fn param_ty(&self, - ast_generics: &hir::Generics, - space: ParamSpace, - index: usize) - -> ty::ParamTy - { - let name = match space { - TypeSpace => ast_generics.ty_params[index].name, - SelfSpace => keywords::SelfType.name(), - FnSpace => bug!("Fn space occupied?"), - }; - - ty::ParamTy { space: space, idx: index as u32, name: name } - } - - fn ty_param_span(&self, - ast_generics: &hir::Generics, - item: &hir::Item, - space: ParamSpace, - index: usize) - -> Span - { - match space { - TypeSpace => ast_generics.ty_params[index].span, - SelfSpace => item.span, - FnSpace => span_bug!(item.span, "Fn space occupied?"), - } - } - fn report_bivariance(&self, span: Span, param_name: ast::Name) @@ -542,12 +579,27 @@ impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { } fn reject_shadowing_type_parameters(tcx: TyCtxt, span: Span, generics: &ty::Generics) { - let impl_params = generics.types.get_slice(subst::TypeSpace).iter() - .map(|tp| tp.name).collect::>(); + let parent = tcx.lookup_generics(generics.parent.unwrap()); + let impl_params: FnvHashMap<_, _> = parent.types + .iter() + .map(|tp| (tp.name, tp.def_id)) + .collect(); + + for method_param in &generics.types { + if impl_params.contains_key(&method_param.name) { + // Tighten up the span to focus on only the shadowing type + let shadow_node_id = tcx.map.as_local_node_id(method_param.def_id).unwrap(); + let type_span = match tcx.map.opt_span(shadow_node_id) { + Some(osp) => osp, + None => span + }; - for method_param in generics.types.get_slice(subst::FnSpace) { - if impl_params.contains(&method_param.name) { - error_194(tcx, span, method_param.name); + // The expectation here is that the original trait declaration is + // local so it should be okay to just unwrap everything. + let trait_def_id = impl_params.get(&method_param.name).unwrap(); + let trait_node_id = tcx.map.as_local_node_id(*trait_def_id).unwrap(); + let trait_decl_span = tcx.map.opt_span(trait_node_id).unwrap(); + error_194(tcx, type_span, trait_decl_span, method_param.name); } } } @@ -621,7 +673,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Trait impl: take implied bounds from all types that // appear in the trait reference. let trait_ref = self.instantiate_type_scheme(span, free_substs, trait_ref); - trait_ref.substs.types.as_slice().to_vec() + trait_ref.substs.types().collect() } None => { @@ -642,18 +694,23 @@ fn error_192(ccx: &CrateCtxt, span: Span) { fn error_380(ccx: &CrateCtxt, span: Span) { span_err!(ccx.tcx.sess, span, E0380, - "traits with default impls (`e.g. unsafe impl \ + "traits with default impls (`e.g. impl \ Trait for ..`) must have no methods or associated items") } fn error_392<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, span: Span, param_name: ast::Name) -> DiagnosticBuilder<'tcx> { - struct_span_err!(ccx.tcx.sess, span, E0392, - "parameter `{}` is never used", param_name) + let mut err = struct_span_err!(ccx.tcx.sess, span, E0392, + "parameter `{}` is never used", param_name); + err.span_label(span, &format!("unused type parameter")); + err } -fn error_194(tcx: TyCtxt, span: Span, name: ast::Name) { - span_err!(tcx.sess, span, E0194, +fn error_194(tcx: TyCtxt, span: Span, trait_decl_span: Span, name: ast::Name) { + struct_span_err!(tcx.sess, span, E0194, "type parameter `{}` shadows another type parameter of the same name", - name); + name) + .span_label(span, &format!("shadows another type parameter")) + .span_label(trait_decl_span, &format!("first `{}` declared here", name)) + .emit(); } diff --git a/src/librustc_typeck/check/writeback.rs b/src/librustc_typeck/check/writeback.rs index 5f8861f309..0b70d904c2 100644 --- a/src/librustc_typeck/check/writeback.rs +++ b/src/librustc_typeck/check/writeback.rs @@ -18,7 +18,6 @@ use hir::def_id::DefId; use rustc::ty::{self, Ty, TyCtxt, MethodCall, MethodCallee}; use rustc::ty::adjustment; use rustc::ty::fold::{TypeFolder,TypeFoldable}; -use rustc::ty::subst::ParamSpace; use rustc::infer::{InferCtxt, FixupError}; use rustc::util::nodemap::DefIdMap; use write_substs_to_tcx; @@ -68,7 +67,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { wbcx.visit_closures(); wbcx.visit_liberated_fn_sigs(); wbcx.visit_fru_field_types(); - wbcx.visit_anon_types(); + wbcx.visit_anon_types(item_id); wbcx.visit_deferred_obligations(item_id); } } @@ -88,7 +87,7 @@ struct WritebackCx<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> { // early-bound versions of them, visible from the // outside of the function. This is needed by, and // only populated if there are any `impl Trait`. - free_to_bound_regions: DefIdMap + free_to_bound_regions: DefIdMap<&'gcx ty::Region> } impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { @@ -103,23 +102,26 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { return wbcx; } + let gcx = fcx.tcx.global_tcx(); let free_substs = fcx.parameter_environment.free_substs; - for &space in &ParamSpace::all() { - for (i, r) in free_substs.regions.get_slice(space).iter().enumerate() { - match *r { - ty::ReFree(ty::FreeRegion { - bound_region: ty::BoundRegion::BrNamed(def_id, name, _), .. - }) => { - let bound_region = ty::ReEarlyBound(ty::EarlyBoundRegion { - space: space, - index: i as u32, - name: name, - }); - wbcx.free_to_bound_regions.insert(def_id, bound_region); - } - _ => { - bug!("{:?} is not a free region for an early-bound lifetime", r); - } + for (i, k) in free_substs.params().iter().enumerate() { + let r = if let Some(r) = k.as_region() { + r + } else { + continue; + }; + match *r { + ty::ReFree(ty::FreeRegion { + bound_region: ty::BoundRegion::BrNamed(def_id, name, _), .. + }) => { + let bound_region = gcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { + index: i as u32, + name: name, + })); + wbcx.free_to_bound_regions.insert(def_id, bound_region); + } + _ => { + bug!("{:?} is not a free region for an early-bound lifetime", r); } } } @@ -199,7 +201,7 @@ impl<'cx, 'gcx, 'tcx, 'v> Visitor<'v> for WritebackCx<'cx, 'gcx, 'tcx> { self.visit_method_map_entry(ResolvingExpr(e.span), MethodCall::expr(e.id)); - if let hir::ExprClosure(_, ref decl, _, _) = e.node { + if let hir::ExprClosure(_, ref decl, ..) = e.node { for input in &decl.inputs { self.visit_node_id(ResolvingExpr(e.span), input.id); } @@ -300,11 +302,13 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { } } - fn visit_anon_types(&self) { + fn visit_anon_types(&self, item_id: ast::NodeId) { if self.fcx.writeback_errors.get() { return } + let item_def_id = self.fcx.tcx.map.local_def_id(item_id); + let gcx = self.tcx().global_tcx(); for (&def_id, &concrete_ty) in self.fcx.anon_types.borrow().iter() { let reason = ResolvingAnonTy(def_id); @@ -313,13 +317,14 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { // Convert the type from the function into a type valid outside // the function, by replacing free regions with early-bound ones. let outside_ty = gcx.fold_regions(&inside_ty, &mut false, |r, _| { - match r { + match *r { // 'static is valid everywhere. - ty::ReStatic => ty::ReStatic, + ty::ReStatic | + ty::ReEmpty => gcx.mk_region(*r), // Free regions that come from early-bound regions are valid. ty::ReFree(ty::FreeRegion { - bound_region: ty::BoundRegion::BrNamed(def_id, _, _), .. + bound_region: ty::BoundRegion::BrNamed(def_id, ..), .. }) if self.free_to_bound_regions.contains_key(&def_id) => { self.free_to_bound_regions[&def_id] } @@ -333,11 +338,10 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { span_err!(self.tcx().sess, span, E0564, "only named lifetimes are allowed in `impl Trait`, \ but `{}` was found in the type `{}`", r, inside_ty); - ty::ReStatic + gcx.mk_region(ty::ReStatic) } ty::ReVar(_) | - ty::ReEmpty | ty::ReErased => { let span = reason.span(self.tcx()); span_bug!(span, "invalid region in impl Trait: {:?}", r); @@ -345,9 +349,9 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { } }); - gcx.tcache.borrow_mut().insert(def_id, ty::TypeScheme { + gcx.register_item_type(def_id, ty::TypeScheme { ty: outside_ty, - generics: ty::Generics::empty() + generics: gcx.lookup_generics(item_def_id) }); } } @@ -628,12 +632,12 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for Resolver<'cx, 'gcx, 'tcx> { } } - fn fold_region(&mut self, r: ty::Region) -> ty::Region { + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { match self.infcx.fully_resolve(&r) { Ok(r) => r, Err(e) => { self.report_error(e); - ty::ReStatic + self.tcx.mk_region(ty::ReStatic) } } } diff --git a/src/librustc_typeck/check_unused.rs b/src/librustc_typeck/check_unused.rs index 2ee0927f3c..f66f15b238 100644 --- a/src/librustc_typeck/check_unused.rs +++ b/src/librustc_typeck/check_unused.rs @@ -49,7 +49,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for UnusedTraitImportVisitor<'a, 'tcx> { } hir::ViewPathList(_, ref path_list) => { for path_item in path_list { - self.check_import(path_item.node.id(), path_item.span); + self.check_import(path_item.node.id, path_item.span); } } } diff --git a/src/librustc_typeck/coherence/mod.rs b/src/librustc_typeck/coherence/mod.rs index 9b26e95f7f..3b4c98fc71 100644 --- a/src/librustc_typeck/coherence/mod.rs +++ b/src/librustc_typeck/coherence/mod.rs @@ -17,14 +17,13 @@ use hir::def_id::DefId; use middle::lang_items::UnsizeTraitLangItem; -use rustc::ty::subst::{self, Subst}; +use rustc::ty::subst::Subst; use rustc::ty::{self, TyCtxt, TypeFoldable}; use rustc::traits::{self, Reveal}; -use rustc::ty::{ImplOrTraitItemId, ConstTraitItemId}; -use rustc::ty::{MethodTraitItemId, TypeTraitItemId, ParameterEnvironment}; -use rustc::ty::{Ty, TyBool, TyChar, TyEnum, TyError}; +use rustc::ty::{ParameterEnvironment}; +use rustc::ty::{Ty, TyBool, TyChar, TyError}; use rustc::ty::{TyParam, TyRawPtr}; -use rustc::ty::{TyRef, TyStruct, TyTrait, TyNever, TyTuple}; +use rustc::ty::{TyRef, TyAdt, TyTrait, TyNever, TyTuple}; use rustc::ty::{TyStr, TyArray, TySlice, TyFloat, TyInfer, TyInt}; use rustc::ty::{TyUint, TyClosure, TyBox, TyFnDef, TyFnPtr}; use rustc::ty::{TyProjection, TyAnon}; @@ -32,16 +31,15 @@ use rustc::ty::util::CopyImplementationError; use middle::free_region::FreeRegionMap; use CrateCtxt; use rustc::infer::{self, InferCtxt, TypeOrigin}; -use std::cell::RefCell; -use std::rc::Rc; use syntax_pos::Span; -use util::nodemap::{DefIdMap, FnvHashMap}; use rustc::dep_graph::DepNode; use rustc::hir::map as hir_map; use rustc::hir::intravisit; use rustc::hir::{Item, ItemImpl}; use rustc::hir; +use std::rc::Rc; + mod orphan; mod overlap; mod unsafety; @@ -49,7 +47,6 @@ mod unsafety; struct CoherenceChecker<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { crate_context: &'a CrateCtxt<'a, 'gcx>, inference_context: InferCtxt<'a, 'gcx, 'tcx>, - inherent_impls: RefCell>>>>, } struct CoherenceCheckVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { @@ -69,13 +66,12 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { // Returns the def ID of the base type, if there is one. fn get_base_type_def_id(&self, span: Span, ty: Ty<'tcx>) -> Option { match ty.sty { - TyEnum(def, _) | - TyStruct(def, _) => { + TyAdt(def, _) => { Some(def.did) } TyTrait(ref t) => { - Some(t.principal_def_id()) + Some(t.principal.def_id()) } TyBox(_) => { @@ -85,7 +81,7 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) | TyStr | TyArray(..) | TySlice(..) | TyFnDef(..) | TyFnPtr(_) | TyTuple(..) | TyParam(..) | TyError | TyNever | - TyRawPtr(_) | TyRef(_, _) | TyProjection(..) => { + TyRawPtr(_) | TyRef(..) | TyProjection(..) => { None } @@ -108,15 +104,6 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { DepNode::CoherenceCheckImpl, &mut CoherenceCheckVisitor { cc: self }); - // Copy over the inherent impls we gathered up during the walk into - // the tcx. - let mut tcx_inherent_impls = - self.crate_context.tcx.inherent_impls.borrow_mut(); - for (k, v) in self.inherent_impls.borrow().iter() { - tcx_inherent_impls.insert((*k).clone(), - Rc::new((*v.borrow()).clone())); - } - // Populate the table of destructors. It might seem a bit strange to // do this here, but it's actually the most convenient place, since // the coherence tables contain the trait -> type mappings. @@ -170,18 +157,12 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { } } - tcx.impl_items.borrow_mut().insert(impl_did, impl_items); + tcx.impl_or_trait_item_def_ids.borrow_mut().insert(impl_did, Rc::new(impl_items)); } fn add_inherent_impl(&self, base_def_id: DefId, impl_def_id: DefId) { - if let Some(implementation_list) = self.inherent_impls.borrow().get(&base_def_id) { - implementation_list.borrow_mut().push(impl_def_id); - return; - } - - self.inherent_impls.borrow_mut().insert( - base_def_id, - Rc::new(RefCell::new(vec!(impl_def_id)))); + let tcx = self.crate_context.tcx; + tcx.inherent_impls.borrow_mut().push(base_def_id, impl_def_id); } fn add_trait_impl(&self, impl_trait_ref: ty::TraitRef<'gcx>, impl_def_id: DefId) { @@ -192,22 +173,11 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { } // Converts an implementation in the AST to a vector of items. - fn create_impl_from_item(&self, item: &Item) -> Vec { + fn create_impl_from_item(&self, item: &Item) -> Vec { match item.node { - ItemImpl(_, _, _, _, _, ref impl_items) => { + ItemImpl(.., ref impl_items) => { impl_items.iter().map(|impl_item| { - let impl_def_id = self.crate_context.tcx.map.local_def_id(impl_item.id); - match impl_item.node { - hir::ImplItemKind::Const(..) => { - ConstTraitItemId(impl_def_id) - } - hir::ImplItemKind::Method(..) => { - MethodTraitItemId(impl_def_id) - } - hir::ImplItemKind::Type(_) => { - TypeTraitItemId(impl_def_id) - } - } + self.crate_context.tcx.map.local_def_id(impl_item.id) }).collect() } _ => { @@ -228,7 +198,7 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { tcx.populate_implementations_for_trait_if_necessary(drop_trait); let drop_trait = tcx.lookup_trait_def(drop_trait); - let impl_items = tcx.impl_items.borrow(); + let impl_items = tcx.impl_or_trait_item_def_ids.borrow(); drop_trait.for_each_impl(tcx, |impl_did| { let items = impl_items.get(&impl_did).unwrap(); @@ -240,9 +210,8 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { let self_type = tcx.lookup_item_type(impl_did); match self_type.ty.sty { - ty::TyEnum(type_def, _) | - ty::TyStruct(type_def, _) => { - type_def.set_destructor(method_def_id.def_id()); + ty::TyAdt(type_def, _) => { + type_def.set_destructor(method_def_id); } _ => { // Destructors only work on nominal types. @@ -250,7 +219,7 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { match tcx.map.find(impl_node_id) { Some(hir_map::NodeItem(item)) => { let span = match item.node { - ItemImpl(_, _, _, _, ref ty, _) => { + ItemImpl(.., ref ty, _) => { ty.span }, _ => item.span @@ -322,7 +291,7 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { } Err(CopyImplementationError::InfrigingVariant(name)) => { let item = tcx.map.expect_item(impl_node_id); - let span = if let ItemImpl(_, _, _, Some(ref tr), _, _) = item.node { + let span = if let ItemImpl(.., Some(ref tr), _, _) = item.node { tr.path.span } else { span @@ -336,7 +305,7 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { } Err(CopyImplementationError::NotAnAdt) => { let item = tcx.map.expect_item(impl_node_id); - let span = if let ItemImpl(_, _, _, _, ref ty, _) = item.node { + let span = if let ItemImpl(.., ref ty, _) = item.node { ty.span } else { span @@ -348,9 +317,11 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { .emit(); } Err(CopyImplementationError::HasDestructor) => { - span_err!(tcx.sess, span, E0184, + struct_span_err!(tcx.sess, span, E0184, "the trait `Copy` may not be implemented for this type; \ - the type has a destructor"); + the type has a destructor") + .span_label(span, &format!("Copy not allowed on types with destructors")) + .emit(); } } }); @@ -386,7 +357,7 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { let source = tcx.lookup_item_type(impl_did).ty; let trait_ref = self.crate_context.tcx.impl_trait_ref(impl_did).unwrap(); - let target = *trait_ref.substs.types.get(subst::TypeSpace, 0); + let target = trait_ref.substs.type_at(1); debug!("check_implementations_of_coerce_unsized: {:?} -> {:?} (bound)", source, target); @@ -413,7 +384,7 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { (&ty::TyBox(a), &ty::TyBox(b)) => (a, b, unsize_trait, None), (&ty::TyRef(r_a, mt_a), &ty::TyRef(r_b, mt_b)) => { - infcx.sub_regions(infer::RelateObjectBound(span), *r_b, *r_a); + infcx.sub_regions(infer::RelateObjectBound(span), r_b, r_a); check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty)) } @@ -422,7 +393,8 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty)) } - (&ty::TyStruct(def_a, substs_a), &ty::TyStruct(def_b, substs_b)) => { + (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) + if def_a.is_struct() && def_b.is_struct() => { if def_a != def_b { let source_path = tcx.item_path_str(def_a.did); let target_path = tcx.item_path_str(def_b.did); @@ -458,13 +430,25 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { being coerced, none found"); return; } else if diff_fields.len() > 1 { - span_err!(tcx.sess, span, E0375, - "the trait `CoerceUnsized` may only be implemented \ - for a coercion between structures with one field \ - being coerced, but {} fields need coercions: {}", - diff_fields.len(), diff_fields.iter().map(|&(i, a, b)| { - format!("{} ({} to {})", fields[i].name, a, b) - }).collect::>().join(", ")); + let item = tcx.map.expect_item(impl_node_id); + let span = if let ItemImpl(.., Some(ref t), _, _) = item.node { + t.path.span + } else { + tcx.map.span(impl_node_id) + }; + + let mut err = struct_span_err!(tcx.sess, span, E0375, + "implementing the trait `CoerceUnsized` \ + requires multiple coercions"); + err.note("`CoerceUnsized` may only be implemented for \ + a coercion between structures with one field being coerced"); + err.note(&format!("currently, {} fields need coercions: {}", + diff_fields.len(), + diff_fields.iter().map(|&(i, a, b)| { + format!("{} ({} to {})", fields[i].name, a, b) + }).collect::>().join(", ") )); + err.span_label(span, &format!("requires multiple coercions")); + err.emit(); return; } @@ -486,7 +470,7 @@ impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { // Register an obligation for `A: Trait`. let cause = traits::ObligationCause::misc(span, impl_node_id); let predicate = tcx.predicate_for_trait_def(cause, trait_def_id, 0, - source, vec![target]); + source, &[target]); fulfill_cx.register_predicate_obligation(&infcx, predicate); // Check that all transitive obligations are satisfied. @@ -540,7 +524,6 @@ pub fn check_coherence(ccx: &CrateCtxt) { CoherenceChecker { crate_context: ccx, inference_context: infcx, - inherent_impls: RefCell::new(FnvHashMap()), }.check(); }); unsafety::check(ccx.tcx); diff --git a/src/librustc_typeck/coherence/orphan.rs b/src/librustc_typeck/coherence/orphan.rs index e426f0cb64..70342a0cd2 100644 --- a/src/librustc_typeck/coherence/orphan.rs +++ b/src/librustc_typeck/coherence/orphan.rs @@ -11,8 +11,7 @@ //! Orphan checker: every impl either implements a trait defined in this //! crate or pertains to a type defined in this crate. -use middle::cstore::LOCAL_CRATE; -use hir::def_id::DefId; +use hir::def_id::{DefId, LOCAL_CRATE}; use rustc::traits; use rustc::ty::{self, TyCtxt}; use syntax::ast; @@ -37,7 +36,7 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { "cannot define inherent `impl` for a type outside of the \ crate where the type is defined") .span_label(item.span, &format!("impl for type defined outside of crate.")) - .span_note(item.span, &format!("define and implement a trait or new type instead")) + .note("define and implement a trait or new type instead") .emit(); } } @@ -68,19 +67,18 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { fn check_item(&self, item: &hir::Item) { let def_id = self.tcx.map.local_def_id(item.id); match item.node { - hir::ItemImpl(_, _, _, None, ref ty, _) => { + hir::ItemImpl(.., None, ref ty, _) => { // For inherent impls, self type must be a nominal type // defined in this crate. debug!("coherence2::orphan check: inherent impl {}", self.tcx.map.node_to_string(item.id)); let self_ty = self.tcx.lookup_item_type(def_id).ty; match self_ty.sty { - ty::TyEnum(def, _) | - ty::TyStruct(def, _) => { + ty::TyAdt(def, _) => { self.check_def_id(item, def.did); } ty::TyTrait(ref data) => { - self.check_def_id(item, data.principal_def_id()); + self.check_def_id(item, data.principal.def_id()); } ty::TyBox(..) => { match self.tcx.lang_items.require_owned_box() { @@ -221,7 +219,7 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { } } } - hir::ItemImpl(_, _, _, Some(_), _, _) => { + hir::ItemImpl(.., Some(_), _, _) => { // "Trait" impl debug!("coherence2::orphan check: trait impl {}", self.tcx.map.node_to_string(item.id)); @@ -293,12 +291,9 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { { let self_ty = trait_ref.self_ty(); let opt_self_def_id = match self_ty.sty { - ty::TyStruct(self_def, _) | ty::TyEnum(self_def, _) => - Some(self_def.did), - ty::TyBox(..) => - self.tcx.lang_items.owned_box(), - _ => - None + ty::TyAdt(self_def, _) => Some(self_def.did), + ty::TyBox(..) => self.tcx.lang_items.owned_box(), + _ => None, }; let msg = match opt_self_def_id { @@ -335,8 +330,10 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { // Disallow *all* explicit impls of `Sized` and `Unsize` for now. if Some(trait_def_id) == self.tcx.lang_items.sized_trait() { - span_err!(self.tcx.sess, item.span, E0322, - "explicit impls for the `Sized` trait are not permitted"); + struct_span_err!(self.tcx.sess, item.span, E0322, + "explicit impls for the `Sized` trait are not permitted") + .span_label(item.span, &format!("impl of 'Sized' not allowed")) + .emit(); return; } if Some(trait_def_id) == self.tcx.lang_items.unsize_trait() { @@ -345,15 +342,19 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { return; } } - hir::ItemDefaultImpl(..) => { + hir::ItemDefaultImpl(_, ref item_trait_ref) => { // "Trait" impl debug!("coherence2::orphan check: default trait impl {}", self.tcx.map.node_to_string(item.id)); let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap(); if trait_ref.def_id.krate != LOCAL_CRATE { - span_err!(self.tcx.sess, item.span, E0318, + struct_span_err!(self.tcx.sess, item_trait_ref.path.span, E0318, "cannot create default implementations for traits outside the \ - crate they're defined in; define a new trait instead"); + crate they're defined in; define a new trait instead") + .span_label(item_trait_ref.path.span, + &format!("`{}` trait not defined in this crate", + item_trait_ref.path)) + .emit(); return; } } diff --git a/src/librustc_typeck/coherence/overlap.rs b/src/librustc_typeck/coherence/overlap.rs index 46a9ef8d5b..c42b8f8840 100644 --- a/src/librustc_typeck/coherence/overlap.rs +++ b/src/librustc_typeck/coherence/overlap.rs @@ -44,29 +44,29 @@ impl<'cx, 'tcx> OverlapChecker<'cx, 'tcx> { enum Namespace { Type, Value } fn name_and_namespace<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - item: &ty::ImplOrTraitItemId) + def_id: DefId) -> (ast::Name, Namespace) { - let name = tcx.impl_or_trait_item(item.def_id()).name(); - (name, match *item { - ty::TypeTraitItemId(..) => Namespace::Type, - ty::ConstTraitItemId(..) => Namespace::Value, - ty::MethodTraitItemId(..) => Namespace::Value, + let item = tcx.impl_or_trait_item(def_id); + (item.name(), match item { + ty::TypeTraitItem(..) => Namespace::Type, + ty::ConstTraitItem(..) => Namespace::Value, + ty::MethodTraitItem(..) => Namespace::Value, }) } - let impl_items = self.tcx.impl_items.borrow(); + let impl_items = self.tcx.impl_or_trait_item_def_ids.borrow(); - for item1 in &impl_items[&impl1] { + for &item1 in &impl_items[&impl1][..] { let (name, namespace) = name_and_namespace(self.tcx, item1); - for item2 in &impl_items[&impl2] { + for &item2 in &impl_items[&impl2][..] { if (name, namespace) == name_and_namespace(self.tcx, item2) { let msg = format!("duplicate definitions with name `{}`", name); - let node_id = self.tcx.map.as_local_node_id(item1.def_id()).unwrap(); + let node_id = self.tcx.map.as_local_node_id(item1).unwrap(); self.tcx.sess.add_lint(lint::builtin::OVERLAPPING_INHERENT_IMPLS, node_id, - self.tcx.span_of_impl(item1.def_id()).unwrap(), + self.tcx.span_of_impl(item1).unwrap(), msg); } } @@ -97,7 +97,7 @@ impl<'cx, 'tcx> OverlapChecker<'cx, 'tcx> { impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for OverlapChecker<'cx, 'tcx> { fn visit_item(&mut self, item: &'v hir::Item) { match item.node { - hir::ItemEnum(..) | hir::ItemStruct(..) => { + hir::ItemEnum(..) | hir::ItemStruct(..) | hir::ItemUnion(..) => { let type_def_id = self.tcx.map.local_def_id(item.id); self.check_for_overlapping_inherent_impls(type_def_id); } @@ -122,7 +122,7 @@ impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for OverlapChecker<'cx, 'tcx> { err.emit(); } } - hir::ItemImpl(_, _, _, Some(_), _, _) => { + hir::ItemImpl(.., Some(_), _, _) => { let impl_def_id = self.tcx.map.local_def_id(item.id); let trait_ref = self.tcx.impl_trait_ref(impl_def_id).unwrap(); let trait_def_id = trait_ref.def_id; @@ -168,14 +168,14 @@ impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for OverlapChecker<'cx, 'tcx> { // This is something like impl Trait1 for Trait2. Illegal // if Trait1 is a supertrait of Trait2 or Trait2 is not object safe. - if !self.tcx.is_object_safe(data.principal_def_id()) { + if !self.tcx.is_object_safe(data.principal.def_id()) { // This is an error, but it will be // reported by wfcheck. Ignore it // here. This is tested by // `coherence-impl-trait-for-trait-object-safe.rs`. } else { let mut supertrait_def_ids = - traits::supertrait_def_ids(self.tcx, data.principal_def_id()); + traits::supertrait_def_ids(self.tcx, data.principal.def_id()); if supertrait_def_ids.any(|d| d == trait_def_id) { span_err!(self.tcx.sess, item.span, E0371, "the object type `{}` automatically \ diff --git a/src/librustc_typeck/coherence/unsafety.rs b/src/librustc_typeck/coherence/unsafety.rs index 53ec72abac..cdf5478e69 100644 --- a/src/librustc_typeck/coherence/unsafety.rs +++ b/src/librustc_typeck/coherence/unsafety.rs @@ -81,7 +81,7 @@ impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for UnsafetyChecker<'cx, 'tcx> { hir::ItemDefaultImpl(unsafety, _) => { self.check_unsafety_coherence(item, unsafety, hir::ImplPolarity::Positive); } - hir::ItemImpl(unsafety, polarity, _, _, _, _) => { + hir::ItemImpl(unsafety, polarity, ..) => { self.check_unsafety_coherence(item, unsafety, polarity); } _ => { } diff --git a/src/librustc_typeck/collect.rs b/src/librustc_typeck/collect.rs index d38065f4f1..e5d4d4a9da 100644 --- a/src/librustc_typeck/collect.rs +++ b/src/librustc_typeck/collect.rs @@ -65,27 +65,25 @@ use middle::lang_items::SizedTraitLangItem; use middle::const_val::ConstVal; use rustc_const_eval::EvalHint::UncheckedExprHint; use rustc_const_eval::{eval_const_expr_partial, report_const_eval_err}; -use rustc::ty::subst::{Substs, FnSpace, ParamSpace, SelfSpace, TypeSpace, VecPerParamSpace}; +use rustc::ty::subst::Substs; use rustc::ty::{ToPredicate, ImplContainer, ImplOrTraitItemContainer, TraitContainer}; -use rustc::ty::{self, ToPolyTraitRef, Ty, TyCtxt, TypeScheme}; +use rustc::ty::{self, AdtKind, ToPolyTraitRef, Ty, TyCtxt, TypeScheme}; use rustc::ty::{VariantKind}; use rustc::ty::util::IntTypeExt; use rscope::*; use rustc::dep_graph::DepNode; use util::common::{ErrorReported, MemoizationMap}; -use util::nodemap::{NodeMap, FnvHashMap}; +use util::nodemap::{NodeMap, FnvHashMap, FnvHashSet}; use {CrateCtxt, write_ty_to_tcx}; use rustc_const_math::ConstInt; use std::cell::RefCell; -use std::collections::HashSet; use std::collections::hash_map::Entry::{Occupied, Vacant}; use std::rc::Rc; use syntax::{abi, ast, attr}; use syntax::parse::token::keywords; -use syntax::ptr::P; use syntax_pos::Span; use rustc::hir::{self, intravisit, map as hir_map, print as pprust}; @@ -120,6 +118,7 @@ struct ItemCtxt<'a,'tcx:'a> { #[derive(Copy, Clone, PartialEq, Eq)] pub enum AstConvRequest { + GetGenerics(DefId), GetItemTypeScheme(DefId), GetTraitDef(DefId), EnsureSuperPredicates(DefId), @@ -187,6 +186,7 @@ impl<'a,'tcx> CrateCtxt<'a,'tcx> { err.span_label(span, &format!("cyclic reference")); match cycle[0] { + AstConvRequest::GetGenerics(def_id) | AstConvRequest::GetItemTypeScheme(def_id) | AstConvRequest::GetTraitDef(def_id) => { err.note( @@ -209,6 +209,7 @@ impl<'a,'tcx> CrateCtxt<'a,'tcx> { for request in &cycle[1..] { match *request { + AstConvRequest::GetGenerics(def_id) | AstConvRequest::GetItemTypeScheme(def_id) | AstConvRequest::GetTraitDef(def_id) => { err.note( @@ -231,6 +232,7 @@ impl<'a,'tcx> CrateCtxt<'a,'tcx> { } match cycle[0] { + AstConvRequest::GetGenerics(def_id) | AstConvRequest::GetItemTypeScheme(def_id) | AstConvRequest::GetTraitDef(def_id) => { err.note( @@ -303,6 +305,14 @@ impl<'a, 'tcx> AstConv<'tcx, 'tcx> for ItemCtxt<'a, 'tcx> { &self.ccx.ast_ty_to_ty_cache } + fn get_generics(&self, span: Span, id: DefId) + -> Result<&'tcx ty::Generics<'tcx>, ErrorReported> + { + self.ccx.cycle_check(span, AstConvRequest::GetGenerics(id), || { + Ok(generics_of_def_id(self.ccx, id)) + }) + } + fn get_item_type_scheme(&self, span: Span, id: DefId) -> Result, ErrorReported> { @@ -351,10 +361,15 @@ impl<'a, 'tcx> AstConv<'tcx, 'tcx> for ItemCtxt<'a, 'tcx> { -> bool { if let Some(trait_id) = self.tcx().map.as_local_node_id(trait_def_id) { - trait_defines_associated_type_named(self.ccx, trait_id, assoc_name) + trait_associated_type_names(self.tcx(), trait_id) + .any(|name| name == assoc_name) } else { - let trait_def = self.tcx().lookup_trait_def(trait_def_id); - trait_def.associated_type_names.contains(&assoc_name) + self.tcx().impl_or_trait_items(trait_def_id).iter().any(|&def_id| { + match self.tcx().impl_or_trait_item(def_id) { + ty::TypeTraitItem(ref item) => item.name == assoc_name, + _ => false + } + }) } } @@ -362,11 +377,7 @@ impl<'a, 'tcx> AstConv<'tcx, 'tcx> for ItemCtxt<'a, 'tcx> { None } - fn ty_infer(&self, - _ty_param_def: Option>, - _substs: Option<&mut Substs<'tcx>>, - _space: Option, - span: Span) -> Ty<'tcx> { + fn ty_infer(&self, span: Span) -> Ty<'tcx> { struct_span_err!( self.tcx().sess, span, @@ -452,35 +463,37 @@ impl<'tcx> GetTypeParameterBounds<'tcx> for () { impl<'tcx> GetTypeParameterBounds<'tcx> for ty::GenericPredicates<'tcx> { fn get_type_parameter_bounds(&self, astconv: &AstConv<'tcx, 'tcx>, - _span: Span, + span: Span, node_id: ast::NodeId) -> Vec> { let def = astconv.tcx().type_parameter_def(node_id); - self.predicates - .iter() - .filter(|predicate| { - match **predicate { - ty::Predicate::Trait(ref data) => { - data.skip_binder().self_ty().is_param(def.space, def.index) - } - ty::Predicate::TypeOutlives(ref data) => { - data.skip_binder().0.is_param(def.space, def.index) - } - ty::Predicate::Rfc1592(..) | - ty::Predicate::Equate(..) | - ty::Predicate::RegionOutlives(..) | - ty::Predicate::WellFormed(..) | - ty::Predicate::ObjectSafe(..) | - ty::Predicate::ClosureKind(..) | - ty::Predicate::Projection(..) => { - false - } + let mut results = self.parent.map_or(vec![], |def_id| { + let parent = astconv.tcx().lookup_predicates(def_id); + parent.get_type_parameter_bounds(astconv, span, node_id) + }); + + results.extend(self.predicates.iter().filter(|predicate| { + match **predicate { + ty::Predicate::Trait(ref data) => { + data.skip_binder().self_ty().is_param(def.index) } - }) - .cloned() - .collect() + ty::Predicate::TypeOutlives(ref data) => { + data.skip_binder().0.is_param(def.index) + } + ty::Predicate::Equate(..) | + ty::Predicate::RegionOutlives(..) | + ty::Predicate::WellFormed(..) | + ty::Predicate::ObjectSafe(..) | + ty::Predicate::ClosureKind(..) | + ty::Predicate::Projection(..) => { + false + } + } + }).cloned()); + + results } } @@ -538,7 +551,7 @@ fn is_param<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let path_res = tcx.expect_resolution(ast_ty.id); match path_res.base_def { Def::SelfTy(Some(def_id), None) | - Def::TyParam(_, _, def_id, _) if path_res.depth == 0 => { + Def::TyParam(def_id) if path_res.depth == 0 => { def_id == tcx.map.local_def_id(param_id) } _ => false @@ -548,7 +561,6 @@ fn is_param<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } - fn convert_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, container: ImplOrTraitItemContainer, name: ast::Name, @@ -556,46 +568,45 @@ fn convert_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, vis: &hir::Visibility, sig: &hir::MethodSig, defaultness: hir::Defaultness, + has_body: bool, untransformed_rcvr_ty: Ty<'tcx>, - rcvr_ty_generics: &ty::Generics<'tcx>, rcvr_ty_predicates: &ty::GenericPredicates<'tcx>) { - let ty_generics = ty_generics_for_fn(ccx, &sig.generics, rcvr_ty_generics); + let def_id = ccx.tcx.map.local_def_id(id); + let ty_generics = generics_of_def_id(ccx, def_id); let ty_generic_predicates = - ty_generic_predicates_for_fn(ccx, &sig.generics, rcvr_ty_predicates); + ty_generic_predicates(ccx, &sig.generics, ty_generics.parent, vec![], false); let (fty, explicit_self_category) = { let anon_scope = match container { - ImplContainer(_) => Some(AnonTypeScope::new(&ty_generics)), + ImplContainer(_) => Some(AnonTypeScope::new(def_id)), TraitContainer(_) => None }; AstConv::ty_of_method(&ccx.icx(&(rcvr_ty_predicates, &sig.generics)), sig, untransformed_rcvr_ty, anon_scope) }; - let def_id = ccx.tcx.map.local_def_id(id); - let substs = mk_item_substs(ccx.tcx, &ty_generics); - - let ty_method = ty::Method::new(name, - ty_generics, - ty_generic_predicates, - fty, - explicit_self_category, - ty::Visibility::from_hir(vis, id, ccx.tcx), - defaultness, - def_id, - container); + let ty_method = ty::Method { + name: name, + generics: ty_generics, + predicates: ty_generic_predicates, + fty: fty, + explicit_self: explicit_self_category, + vis: ty::Visibility::from_hir(vis, id, ccx.tcx), + defaultness: defaultness, + has_body: has_body, + def_id: def_id, + container: container, + }; + let substs = mk_item_substs(&ccx.icx(&(rcvr_ty_predicates, &sig.generics)), + ccx.tcx.map.span(id), def_id); let fty = ccx.tcx.mk_fn_def(def_id, substs, ty_method.fty); debug!("method {} (id {}) has type {:?}", name, id, fty); - ccx.tcx.register_item_type(def_id, TypeScheme { - generics: ty_method.generics.clone(), - ty: fty - }); - ccx.tcx.predicates.borrow_mut().insert(def_id, ty_method.predicates.clone()); - + ccx.tcx.tcache.borrow_mut().insert(def_id, fty); write_ty_to_tcx(ccx, id, fty); + ccx.tcx.predicates.borrow_mut().insert(def_id, ty_method.predicates.clone()); debug!("writing method type: def_id={:?} mty={:?}", def_id, ty_method); @@ -605,7 +616,7 @@ fn convert_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, } fn convert_field<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - struct_generics: &ty::Generics<'tcx>, + struct_generics: &'tcx ty::Generics<'tcx>, struct_predicates: &ty::GenericPredicates<'tcx>, field: &hir::StructField, ty_f: ty::FieldDefMaster<'tcx>) @@ -617,7 +628,7 @@ fn convert_field<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, /* add the field to the tcache */ ccx.tcx.register_item_type(ccx.tcx.map.local_def_id(field.id), ty::TypeScheme { - generics: struct_generics.clone(), + generics: struct_generics, ty: tt }); ccx.tcx.predicates.borrow_mut().insert(ccx.tcx.map.local_def_id(field.id), @@ -633,8 +644,12 @@ fn convert_associated_const<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, ty: ty::Ty<'tcx>, has_value: bool) { + let predicates = ty::GenericPredicates { + parent: Some(container.id()), + predicates: vec![] + }; ccx.tcx.predicates.borrow_mut().insert(ccx.tcx.map.local_def_id(id), - ty::GenericPredicates::empty()); + predicates); write_ty_to_tcx(ccx, id, ty); @@ -659,6 +674,13 @@ fn convert_associated_type<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, defaultness: hir::Defaultness, ty: Option>) { + let predicates = ty::GenericPredicates { + parent: Some(container.id()), + predicates: vec![] + }; + ccx.tcx.predicates.borrow_mut().insert(ccx.tcx.map.local_def_id(id), + predicates); + let associated_type = Rc::new(ty::AssociatedType { name: name, vis: ty::Visibility::from_hir(vis, id, ccx.tcx), @@ -713,8 +735,9 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { } } hir::ItemEnum(ref enum_definition, _) => { - let (scheme, predicates) = convert_typed_item(ccx, it); - write_ty_to_tcx(ccx, it.id, scheme.ty); + let def_id = ccx.tcx.map.local_def_id(it.id); + let scheme = type_scheme_of_def_id(ccx, def_id); + let predicates = predicates_of_item(ccx, it); convert_enum_variant_types(ccx, tcx.lookup_adt_def_master(ccx.tcx.map.local_def_id(it.id)), scheme, @@ -726,14 +749,14 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { AstConv::instantiate_mono_trait_ref(&ccx.icx(&()), &ExplicitRscope, ast_trait_ref, - None); + tcx.mk_self_type()); tcx.record_trait_has_default_impl(trait_ref.def_id); tcx.impl_trait_refs.borrow_mut().insert(ccx.tcx.map.local_def_id(it.id), Some(trait_ref)); } - hir::ItemImpl(_, _, + hir::ItemImpl(.., ref generics, ref opt_trait_ref, ref selfty, @@ -741,8 +764,9 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { // Create generics from the generics specified in the impl head. debug!("convert: ast_generics={:?}", generics); let def_id = ccx.tcx.map.local_def_id(it.id); - let ty_generics = ty_generics_for_impl(ccx, generics); - let mut ty_predicates = ty_generic_predicates_for_type_or_impl(ccx, generics); + let ty_generics = generics_of_def_id(ccx, def_id); + let mut ty_predicates = + ty_generic_predicates(ccx, generics, None, vec![], false); debug!("convert: impl_bounds={:?}", ty_predicates); @@ -750,13 +774,13 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { write_ty_to_tcx(ccx, it.id, selfty); tcx.register_item_type(def_id, - TypeScheme { generics: ty_generics.clone(), + TypeScheme { generics: ty_generics, ty: selfty }); let trait_ref = opt_trait_ref.as_ref().map(|ast_trait_ref| { AstConv::instantiate_mono_trait_ref(&ccx.icx(&ty_predicates), &ExplicitRscope, ast_trait_ref, - Some(selfty)) + selfty) }); tcx.impl_trait_refs.borrow_mut().insert(def_id, trait_ref); @@ -791,11 +815,13 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { } if let hir::ImplItemKind::Const(ref ty, _) = impl_item.node { + let const_def_id = ccx.tcx.map.local_def_id(impl_item.id); + let ty_generics = generics_of_def_id(ccx, const_def_id); let ty = ccx.icx(&ty_predicates) .to_ty(&ExplicitRscope, &ty); - tcx.register_item_type(ccx.tcx.map.local_def_id(impl_item.id), + tcx.register_item_type(const_def_id, TypeScheme { - generics: ty_generics.clone(), + generics: ty_generics, ty: ty, }); // Trait-associated constants are always public. @@ -812,6 +838,9 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { // Convert all the associated types. for impl_item in impl_items { if let hir::ImplItemKind::Type(ref ty) = impl_item.node { + let type_def_id = ccx.tcx.map.local_def_id(impl_item.id); + generics_of_def_id(ccx, type_def_id); + if opt_trait_ref.is_none() { span_err!(tcx.sess, impl_item.span, E0202, "associated types are not allowed in inherent impls"); @@ -833,14 +862,14 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { convert_method(ccx, ImplContainer(def_id), impl_item.name, impl_item.id, method_vis, - sig, impl_item.defaultness, selfty, &ty_generics, + sig, impl_item.defaultness, true, selfty, &ty_predicates); } } enforce_impl_lifetimes_are_constrained(ccx, generics, def_id, impl_items); }, - hir::ItemTrait(_, _, _, ref trait_items) => { + hir::ItemTrait(.., ref trait_items) => { let trait_def = trait_def_of_item(ccx, it); let def_id = trait_def.trait_ref.def_id; let _: Result<(), ErrorReported> = // any error is already reported, can ignore @@ -856,11 +885,13 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { // Convert all the associated constants. for trait_item in trait_items { if let hir::ConstTraitItem(ref ty, ref default) = trait_item.node { + let const_def_id = ccx.tcx.map.local_def_id(trait_item.id); + let ty_generics = generics_of_def_id(ccx, const_def_id); let ty = ccx.icx(&trait_predicates) .to_ty(&ExplicitRscope, ty); - tcx.register_item_type(ccx.tcx.map.local_def_id(trait_item.id), + tcx.register_item_type(const_def_id, TypeScheme { - generics: trait_def.generics.clone(), + generics: ty_generics, ty: ty, }); convert_associated_const(ccx, @@ -877,6 +908,9 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { // Convert all the associated types. for trait_item in trait_items { if let hir::TypeTraitItem(_, ref opt_ty) = trait_item.node { + let type_def_id = ccx.tcx.map.local_def_id(trait_item.id); + generics_of_def_id(ccx, type_def_id); + let typ = opt_ty.as_ref().map({ |ty| ccx.icx(&trait_predicates).to_ty(&ExplicitRscope, &ty) }); @@ -893,7 +927,7 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { // Convert all the methods for trait_item in trait_items { - if let hir::MethodTraitItem(ref sig, _) = trait_item.node { + if let hir::MethodTraitItem(ref sig, ref body) = trait_item.node { convert_method(ccx, container, trait_item.name, @@ -901,8 +935,8 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { &hir::Inherited, sig, hir::Defaultness::Default, + body.is_some(), tcx.mk_self_type(), - &trait_def.generics, &trait_predicates); } @@ -910,22 +944,18 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { // Add an entry mapping let trait_item_def_ids = Rc::new(trait_items.iter().map(|trait_item| { - let def_id = ccx.tcx.map.local_def_id(trait_item.id); - match trait_item.node { - hir::ConstTraitItem(..) => ty::ConstTraitItemId(def_id), - hir::MethodTraitItem(..) => ty::MethodTraitItemId(def_id), - hir::TypeTraitItem(..) => ty::TypeTraitItemId(def_id) - } + ccx.tcx.map.local_def_id(trait_item.id) }).collect()); - tcx.trait_item_def_ids.borrow_mut().insert(ccx.tcx.map.local_def_id(it.id), - trait_item_def_ids); + tcx.impl_or_trait_item_def_ids.borrow_mut().insert(ccx.tcx.map.local_def_id(it.id), + trait_item_def_ids); }, - hir::ItemStruct(ref struct_def, _) => { - let (scheme, predicates) = convert_typed_item(ccx, it); - write_ty_to_tcx(ccx, it.id, scheme.ty); + hir::ItemStruct(ref struct_def, _) | + hir::ItemUnion(ref struct_def, _) => { + let def_id = ccx.tcx.map.local_def_id(it.id); + let scheme = type_scheme_of_def_id(ccx, def_id); + let predicates = predicates_of_item(ccx, it); - let it_def_id = ccx.tcx.map.local_def_id(it.id); - let variant = tcx.lookup_adt_def_master(it_def_id).struct_variant(); + let variant = tcx.lookup_adt_def_master(def_id).struct_variant(); for (f, ty_f) in struct_def.fields().iter().zip(variant.fields.iter()) { convert_field(ccx, &scheme.generics, &predicates, f, ty_f) @@ -937,15 +967,14 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { }, hir::ItemTy(_, ref generics) => { ensure_no_ty_param_bounds(ccx, it.span, generics, "type"); - let (scheme, _) = convert_typed_item(ccx, it); - write_ty_to_tcx(ccx, it.id, scheme.ty); + let def_id = ccx.tcx.map.local_def_id(it.id); + type_scheme_of_def_id(ccx, def_id); + predicates_of_item(ccx, it); }, _ => { - // This call populates the type cache with the converted type - // of the item in passing. All we have to do here is to write - // it into the node type table. - let (scheme, _) = convert_typed_item(ccx, it); - write_ty_to_tcx(ccx, it.id, scheme.ty); + let def_id = ccx.tcx.map.local_def_id(it.id); + type_scheme_of_def_id(ccx, def_id); + predicates_of_item(ccx, it); }, } } @@ -956,6 +985,8 @@ fn convert_variant_ctor<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, scheme: ty::TypeScheme<'tcx>, predicates: ty::GenericPredicates<'tcx>) { let tcx = ccx.tcx; + let def_id = tcx.map.local_def_id(ctor_id); + generics_of_def_id(ccx, def_id); let ctor_ty = match variant.kind { VariantKind::Unit | VariantKind::Struct => scheme.ty, VariantKind::Tuple => { @@ -964,8 +995,8 @@ fn convert_variant_ctor<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, .iter() .map(|field| field.unsubst_ty()) .collect(); - let def_id = tcx.map.local_def_id(ctor_id); - let substs = mk_item_substs(tcx, &scheme.generics); + let substs = mk_item_substs(&ccx.icx(&predicates), + ccx.tcx.map.span(ctor_id), def_id); tcx.mk_fn_def(def_id, substs, tcx.mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Normal, abi: abi::Abi::Rust, @@ -978,12 +1009,8 @@ fn convert_variant_ctor<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, } }; write_ty_to_tcx(ccx, ctor_id, ctor_ty); + tcx.tcache.borrow_mut().insert(def_id, ctor_ty); tcx.predicates.borrow_mut().insert(tcx.map.local_def_id(ctor_id), predicates); - tcx.register_item_type(tcx.map.local_def_id(ctor_id), - TypeScheme { - generics: scheme.generics, - ty: ctor_ty - }); } fn convert_enum_variant_types<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, @@ -1053,7 +1080,7 @@ fn convert_struct_def<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, let ctor_id = if !def.is_struct() { Some(ccx.tcx.map.local_def_id(def.id())) } else { None }; let variants = vec![convert_struct_variant(ccx, ctor_id.unwrap_or(did), it.name, ConstInt::Infer(0), def)]; - let adt = ccx.tcx.intern_adt_def(did, ty::AdtKind::Struct, variants); + let adt = ccx.tcx.intern_adt_def(did, AdtKind::Struct, variants); if let Some(ctor_id) = ctor_id { // Make adt definition available through constructor id as well. ccx.tcx.insert_adt_def(ctor_id, adt); @@ -1061,6 +1088,16 @@ fn convert_struct_def<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, adt } +fn convert_union_def<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + it: &hir::Item, + def: &hir::VariantData) + -> ty::AdtDefMaster<'tcx> +{ + let did = ccx.tcx.map.local_def_id(it.id); + let variants = vec![convert_struct_variant(ccx, did, it.name, ConstInt::Infer(0), def)]; + ccx.tcx.intern_adt_def(did, AdtKind::Union, variants) +} + fn evaluate_disr_expr(ccx: &CrateCtxt, repr_ty: attr::IntType, e: &hir::Expr) -> Option { debug!("disr expr, checking {}", pprust::expr_to_string(e)); @@ -1138,7 +1175,7 @@ fn convert_enum_def<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, let did = tcx.map.local_def_id(v.node.data.id()); convert_struct_variant(ccx, did, v.node.name, disr, &v.node.data) }).collect(); - tcx.intern_adt_def(tcx.map.local_def_id(it.id), ty::AdtKind::Enum, variants) + tcx.intern_adt_def(tcx.map.local_def_id(it.id), AdtKind::Enum, variants) } /// Ensures that the super-predicates of the trait with def-id @@ -1185,9 +1222,8 @@ fn ensure_super_predicates_step(ccx: &CrateCtxt, // generic types: let trait_def = trait_def_of_item(ccx, item); let self_predicate = ty::GenericPredicates { - predicates: VecPerParamSpace::new(vec![], - vec![trait_def.trait_ref.to_predicate()], - vec![]) + parent: None, + predicates: vec![trait_def.trait_ref.to_predicate()] }; let scope = &(generics, &self_predicate); @@ -1209,7 +1245,8 @@ fn ensure_super_predicates_step(ccx: &CrateCtxt, // Combine the two lists to form the complete set of superbounds: let superbounds = superbounds1.into_iter().chain(superbounds2).collect(); let superpredicates = ty::GenericPredicates { - predicates: VecPerParamSpace::new(superbounds, vec![], vec![]) + parent: None, + predicates: superbounds }; debug!("superpredicates for trait {:?} = {:?}", tcx.map.local_def_id(item.id), @@ -1242,8 +1279,10 @@ fn trait_def_of_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, return def.clone(); } - let (unsafety, generics, items) = match it.node { - hir::ItemTrait(unsafety, ref generics, _, ref items) => (unsafety, generics, items), + let (unsafety, generics) = match it.node { + hir::ItemTrait(unsafety, ref generics, _, _) => { + (unsafety, generics) + } _ => span_bug!(it.span, "trait_def_of_item invoked on non-trait"), }; @@ -1259,83 +1298,36 @@ fn trait_def_of_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, err.emit(); } - let substs = ccx.tcx.mk_substs(mk_trait_substs(ccx, generics)); + let ty_generics = generics_of_def_id(ccx, def_id); + let substs = mk_item_substs(&ccx.icx(generics), it.span, def_id); - let ty_generics = ty_generics_for_trait(ccx, it.id, substs, generics); + let def_path_hash = tcx.def_path(def_id).deterministic_hash(tcx); - let associated_type_names: Vec<_> = items.iter().filter_map(|trait_item| { - match trait_item.node { - hir::TypeTraitItem(..) => Some(trait_item.name), - _ => None, - } - }).collect(); - - let trait_ref = ty::TraitRef { - def_id: def_id, - substs: substs, - }; + let trait_ref = ty::TraitRef::new(def_id, substs); + let trait_def = ty::TraitDef::new(unsafety, paren_sugar, ty_generics, trait_ref, + def_path_hash); - let trait_def = ty::TraitDef::new(unsafety, - paren_sugar, - ty_generics, - trait_ref, - associated_type_names); - - return tcx.intern_trait_def(trait_def); - - fn mk_trait_substs<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - generics: &hir::Generics) - -> Substs<'tcx> - { - let tcx = ccx.tcx; - - // Creates a no-op substitution for the trait's type parameters. - let regions = - generics.lifetimes - .iter() - .enumerate() - .map(|(i, def)| ty::ReEarlyBound(ty::EarlyBoundRegion { - space: TypeSpace, - index: i as u32, - name: def.lifetime.name - })) - .collect(); - - // Start with the generics in the type parameters... - let types: Vec<_> = - generics.ty_params - .iter() - .enumerate() - .map(|(i, def)| tcx.mk_param(TypeSpace, - i as u32, def.name)) - .collect(); - - // ...and also create the `Self` parameter. - let self_ty = tcx.mk_self_type(); - - Substs::new_trait(types, regions, self_ty) - } + tcx.intern_trait_def(trait_def) } -fn trait_defines_associated_type_named(ccx: &CrateCtxt, - trait_node_id: ast::NodeId, - assoc_name: ast::Name) - -> bool +pub fn trait_associated_type_names<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_node_id: ast::NodeId) + -> impl Iterator + 'a { - let item = match ccx.tcx.map.get(trait_node_id) { + let item = match tcx.map.get(trait_node_id) { hir_map::NodeItem(item) => item, _ => bug!("trait_node_id {} is not an item", trait_node_id) }; let trait_items = match item.node { - hir::ItemTrait(_, _, _, ref trait_items) => trait_items, + hir::ItemTrait(.., ref trait_items) => trait_items, _ => bug!("trait_node_id {} is not a trait", trait_node_id) }; - trait_items.iter().any(|trait_item| { + trait_items.iter().filter_map(|trait_item| { match trait_item.node { - hir::TypeTraitItem(..) => trait_item.name == assoc_name, - _ => false, + hir::TypeTraitItem(..) => Some(trait_item.name), + _ => None, } }) } @@ -1363,23 +1355,23 @@ fn convert_trait_predicates<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, it: &hir::Item) // but to get the full set of predicates on a trait we need to add // in the supertrait bounds and anything declared on the // associated types. - let mut base_predicates = super_predicates; + let mut base_predicates = super_predicates.predicates; // Add in a predicate that `Self:Trait` (where `Trait` is the // current trait). This is needed for builtin bounds. let self_predicate = trait_def.trait_ref.to_poly_trait_ref().to_predicate(); - base_predicates.predicates.push(SelfSpace, self_predicate); + base_predicates.push(self_predicate); // add in the explicit where-clauses let mut trait_predicates = - ty_generic_predicates(ccx, TypeSpace, generics, &base_predicates); + ty_generic_predicates(ccx, generics, None, base_predicates, true); let assoc_predicates = predicates_for_associated_types(ccx, generics, &trait_predicates, trait_def.trait_ref, items); - trait_predicates.predicates.extend(TypeSpace, assoc_predicates.into_iter()); + trait_predicates.predicates.extend(assoc_predicates); let prev_predicates = tcx.predicates.borrow_mut().insert(def_id, trait_predicates); assert!(prev_predicates.is_none()); @@ -1416,192 +1408,272 @@ fn convert_trait_predicates<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, it: &hir::Item) } } -fn type_scheme_of_def_id<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - def_id: DefId) - -> ty::TypeScheme<'tcx> -{ - if let Some(node_id) = ccx.tcx.map.as_local_node_id(def_id) { - match ccx.tcx.map.find(node_id) { - Some(hir_map::NodeItem(item)) => { - type_scheme_of_item(ccx, &item) +fn generics_of_def_id<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + def_id: DefId) + -> &'tcx ty::Generics<'tcx> { + let tcx = ccx.tcx; + let node_id = if let Some(id) = tcx.map.as_local_node_id(def_id) { + id + } else { + return tcx.lookup_generics(def_id); + }; + tcx.generics.memoize(def_id, || { + use rustc::hir::map::*; + use rustc::hir::*; + + let node = tcx.map.get(node_id); + let parent_def_id = match node { + NodeImplItem(_) | + NodeTraitItem(_) | + NodeVariant(_) | + NodeStructCtor(_) => { + let parent_id = tcx.map.get_parent(node_id); + Some(tcx.map.local_def_id(parent_id)) } - Some(hir_map::NodeForeignItem(foreign_item)) => { - let abi = ccx.tcx.map.get_foreign_abi(node_id); - type_scheme_of_foreign_item(ccx, &foreign_item, abi) + _ => None + }; + + let mut opt_self = None; + let mut allow_defaults = false; + + let no_generics = hir::Generics::empty(); + let ast_generics = match node { + NodeTraitItem(item) => { + match item.node { + MethodTraitItem(ref sig, _) => &sig.generics, + _ => &no_generics + } } - x => { - bug!("unexpected sort of node in get_item_type_scheme(): {:?}", - x); + + NodeImplItem(item) => { + match item.node { + ImplItemKind::Method(ref sig, _) => &sig.generics, + _ => &no_generics + } + } + + NodeItem(item) => { + match item.node { + ItemFn(.., ref generics, _) | + ItemImpl(_, _, ref generics, ..) => generics, + + ItemTy(_, ref generics) | + ItemEnum(_, ref generics) | + ItemStruct(_, ref generics) | + ItemUnion(_, ref generics) => { + allow_defaults = true; + generics + } + + ItemTrait(_, ref generics, ..) => { + // Add in the self type parameter. + // + // Something of a hack: use the node id for the trait, also as + // the node id for the Self type parameter. + let param_id = item.id; + + let parent = ccx.tcx.map.get_parent(param_id); + + let def = ty::TypeParameterDef { + index: 0, + name: keywords::SelfType.name(), + def_id: tcx.map.local_def_id(param_id), + default_def_id: tcx.map.local_def_id(parent), + default: None, + object_lifetime_default: ty::ObjectLifetimeDefault::BaseDefault, + }; + tcx.ty_param_defs.borrow_mut().insert(param_id, def.clone()); + opt_self = Some(def); + + allow_defaults = true; + generics + } + + _ => &no_generics + } } + + NodeForeignItem(item) => { + match item.node { + ForeignItemStatic(..) => &no_generics, + ForeignItemFn(_, ref generics) => generics + } + } + + _ => &no_generics + }; + + let has_self = opt_self.is_some(); + let mut parent_has_self = false; + let mut own_start = has_self as u32; + let (parent_regions, parent_types) = parent_def_id.map_or((0, 0), |def_id| { + let generics = generics_of_def_id(ccx, def_id); + assert_eq!(generics.parent, None); + assert_eq!(generics.parent_regions, 0); + assert_eq!(generics.parent_types, 0); + assert_eq!(has_self, false); + parent_has_self = generics.has_self; + own_start = generics.count() as u32; + (generics.regions.len() as u32, generics.types.len() as u32) + }); + + let early_lifetimes = early_bound_lifetimes_from_generics(ccx, ast_generics); + let regions = early_lifetimes.iter().enumerate().map(|(i, l)| { + ty::RegionParameterDef { + name: l.lifetime.name, + index: own_start + i as u32, + def_id: tcx.map.local_def_id(l.lifetime.id), + bounds: l.bounds.iter().map(|l| { + ast_region_to_region(tcx, l) + }).collect() + } + }).collect::>(); + + // Now create the real type parameters. + let type_start = own_start + regions.len() as u32; + let types = ast_generics.ty_params.iter().enumerate().map(|(i, p)| { + let i = type_start + i as u32; + get_or_create_type_parameter_def(ccx, ast_generics, i, p, allow_defaults) + }); + let types: Vec<_> = opt_self.into_iter().chain(types).collect(); + + // Debugging aid. + if tcx.has_attr(def_id, "rustc_object_lifetime_default") { + let object_lifetime_default_reprs: String = + types.iter().map(|t| { + match t.object_lifetime_default { + ty::ObjectLifetimeDefault::Specific(r) => r.to_string(), + d => format!("{:?}", d), + } + }).collect::>().join(","); + tcx.sess.span_err(tcx.map.span(node_id), &object_lifetime_default_reprs); } - } else { - ccx.tcx.lookup_item_type(def_id) - } + + tcx.alloc_generics(ty::Generics { + parent: parent_def_id, + parent_regions: parent_regions, + parent_types: parent_types, + regions: regions, + types: types, + has_self: has_self || parent_has_self + }) + }) } -fn type_scheme_of_item<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - item: &hir::Item) - -> ty::TypeScheme<'tcx> -{ - let item_def_id = ccx.tcx.map.local_def_id(item.id); - ccx.tcx.tcache.memoize(item_def_id, || { - // NB. Since the `memoized` function enters a new task, and we - // are giving this task access to the item `item`, we must - // register a read. - assert!(!ccx.tcx.map.is_inlined_def_id(item_def_id)); - ccx.tcx.dep_graph.read(DepNode::Hir(item_def_id)); - compute_type_scheme_of_item(ccx, item) +fn type_of_def_id<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + def_id: DefId) + -> Ty<'tcx> { + let node_id = if let Some(id) = ccx.tcx.map.as_local_node_id(def_id) { + id + } else { + return ccx.tcx.lookup_item_type(def_id).ty; + }; + ccx.tcx.tcache.memoize(def_id, || { + use rustc::hir::map::*; + use rustc::hir::*; + + let ty = match ccx.tcx.map.get(node_id) { + NodeItem(item) => { + match item.node { + ItemStatic(ref t, ..) | ItemConst(ref t, _) => { + ccx.icx(&()).to_ty(&StaticRscope::new(&ccx.tcx), &t) + } + ItemFn(ref decl, unsafety, _, abi, ref generics, _) => { + let tofd = AstConv::ty_of_bare_fn(&ccx.icx(generics), unsafety, abi, &decl, + Some(AnonTypeScope::new(def_id))); + let substs = mk_item_substs(&ccx.icx(generics), item.span, def_id); + ccx.tcx.mk_fn_def(def_id, substs, tofd) + } + ItemTy(ref t, ref generics) => { + ccx.icx(generics).to_ty(&ExplicitRscope, &t) + } + ItemEnum(ref ei, ref generics) => { + let def = convert_enum_def(ccx, item, ei); + let substs = mk_item_substs(&ccx.icx(generics), item.span, def_id); + ccx.tcx.mk_adt(def, substs) + } + ItemStruct(ref si, ref generics) => { + let def = convert_struct_def(ccx, item, si); + let substs = mk_item_substs(&ccx.icx(generics), item.span, def_id); + ccx.tcx.mk_adt(def, substs) + } + ItemUnion(ref un, ref generics) => { + let def = convert_union_def(ccx, item, un); + let substs = mk_item_substs(&ccx.icx(generics), item.span, def_id); + ccx.tcx.mk_adt(def, substs) + } + ItemDefaultImpl(..) | + ItemTrait(..) | + ItemImpl(..) | + ItemMod(..) | + ItemForeignMod(..) | + ItemExternCrate(..) | + ItemUse(..) => { + span_bug!( + item.span, + "compute_type_of_item: unexpected item type: {:?}", + item.node); + } + } + } + NodeForeignItem(foreign_item) => { + let abi = ccx.tcx.map.get_foreign_abi(node_id); + + match foreign_item.node { + ForeignItemFn(ref fn_decl, ref generics) => { + compute_type_of_foreign_fn_decl( + ccx, ccx.tcx.map.local_def_id(foreign_item.id), + fn_decl, generics, abi) + } + ForeignItemStatic(ref t, _) => { + ccx.icx(&()).to_ty(&ExplicitRscope, t) + } + } + } + x => { + bug!("unexpected sort of node in type_of_def_id(): {:?}", x); + } + }; + + write_ty_to_tcx(ccx, node_id, ty); + ty }) } -fn compute_type_scheme_of_item<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - it: &hir::Item) - -> ty::TypeScheme<'tcx> -{ - let tcx = ccx.tcx; - match it.node { - hir::ItemStatic(ref t, _, _) | hir::ItemConst(ref t, _) => { - let ty = ccx.icx(&()).to_ty(&ExplicitRscope, &t); - ty::TypeScheme { ty: ty, generics: ty::Generics::empty() } - } - hir::ItemFn(ref decl, unsafety, _, abi, ref generics, _) => { - let ty_generics = ty_generics_for_fn(ccx, generics, &ty::Generics::empty()); - let tofd = AstConv::ty_of_bare_fn(&ccx.icx(generics), unsafety, abi, &decl, - Some(AnonTypeScope::new(&ty_generics))); - let def_id = ccx.tcx.map.local_def_id(it.id); - let substs = mk_item_substs(tcx, &ty_generics); - let ty = tcx.mk_fn_def(def_id, substs, tofd); - ty::TypeScheme { ty: ty, generics: ty_generics } - } - hir::ItemTy(ref t, ref generics) => { - let ty_generics = ty_generics_for_type(ccx, generics); - let ty = ccx.icx(generics).to_ty(&ExplicitRscope, &t); - ty::TypeScheme { ty: ty, generics: ty_generics } - } - hir::ItemEnum(ref ei, ref generics) => { - let def = convert_enum_def(ccx, it, ei); - let ty_generics = ty_generics_for_type(ccx, generics); - let substs = mk_item_substs(tcx, &ty_generics); - let t = tcx.mk_enum(def, substs); - ty::TypeScheme { ty: t, generics: ty_generics } - } - hir::ItemStruct(ref si, ref generics) => { - let def = convert_struct_def(ccx, it, si); - let ty_generics = ty_generics_for_type(ccx, generics); - let substs = mk_item_substs(tcx, &ty_generics); - let t = tcx.mk_struct(def, substs); - ty::TypeScheme { ty: t, generics: ty_generics } - } - hir::ItemDefaultImpl(..) | - hir::ItemTrait(..) | - hir::ItemImpl(..) | - hir::ItemMod(..) | - hir::ItemForeignMod(..) | - hir::ItemExternCrate(..) | - hir::ItemUse(..) => { - span_bug!( - it.span, - "compute_type_scheme_of_item: unexpected item type: {:?}", - it.node); +fn type_scheme_of_def_id<'a, 'tcx>(ccx: &CrateCtxt<'a,'tcx>, + def_id: DefId) + -> ty::TypeScheme<'tcx> { + if def_id.is_local() { + ty::TypeScheme { + generics: generics_of_def_id(ccx, def_id), + ty: type_of_def_id(ccx, def_id) } + } else { + ccx.tcx.lookup_item_type(def_id) } } -fn convert_typed_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, +fn predicates_of_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, it: &hir::Item) - -> (ty::TypeScheme<'tcx>, ty::GenericPredicates<'tcx>) -{ - let tcx = ccx.tcx; + -> ty::GenericPredicates<'tcx> { + let def_id = ccx.tcx.map.local_def_id(it.id); - let tag = type_scheme_of_item(ccx, it); - let scheme = TypeScheme { generics: tag.generics, ty: tag.ty }; - let predicates = match it.node { - hir::ItemStatic(..) | hir::ItemConst(..) => { - ty::GenericPredicates::empty() - } - hir::ItemFn(_, _, _, _, ref ast_generics, _) => { - ty_generic_predicates_for_fn(ccx, ast_generics, &ty::GenericPredicates::empty()) - } - hir::ItemTy(_, ref generics) => { - ty_generic_predicates_for_type_or_impl(ccx, generics) - } - hir::ItemEnum(_, ref generics) => { - ty_generic_predicates_for_type_or_impl(ccx, generics) - } - hir::ItemStruct(_, ref generics) => { - ty_generic_predicates_for_type_or_impl(ccx, generics) - } - hir::ItemDefaultImpl(..) | - hir::ItemTrait(..) | - hir::ItemExternCrate(..) | - hir::ItemUse(..) | - hir::ItemImpl(..) | - hir::ItemMod(..) | - hir::ItemForeignMod(..) => { - span_bug!( - it.span, - "compute_type_scheme_of_item: unexpected item type: {:?}", - it.node); - } + let no_generics = hir::Generics::empty(); + let generics = match it.node { + hir::ItemFn(.., ref generics, _) | + hir::ItemTy(_, ref generics) | + hir::ItemEnum(_, ref generics) | + hir::ItemStruct(_, ref generics) | + hir::ItemUnion(_, ref generics) => generics, + _ => &no_generics }; - let prev_predicates = tcx.predicates.borrow_mut().insert(ccx.tcx.map.local_def_id(it.id), - predicates.clone()); + let predicates = ty_generic_predicates(ccx, generics, None, vec![], false); + let prev_predicates = ccx.tcx.predicates.borrow_mut().insert(def_id, + predicates.clone()); assert!(prev_predicates.is_none()); - // Debugging aid. - if tcx.has_attr(ccx.tcx.map.local_def_id(it.id), "rustc_object_lifetime_default") { - let object_lifetime_default_reprs: String = - scheme.generics.types.iter() - .map(|t| match t.object_lifetime_default { - ty::ObjectLifetimeDefault::Specific(r) => r.to_string(), - d => format!("{:?}", d), - }) - .collect::>() - .join(","); - - tcx.sess.span_err(it.span, &object_lifetime_default_reprs); - } - - return (scheme, predicates); -} - -fn type_scheme_of_foreign_item<'a, 'tcx>( - ccx: &CrateCtxt<'a, 'tcx>, - item: &hir::ForeignItem, - abi: abi::Abi) - -> ty::TypeScheme<'tcx> -{ - let item_def_id = ccx.tcx.map.local_def_id(item.id); - ccx.tcx.tcache.memoize(item_def_id, || { - // NB. Since the `memoized` function enters a new task, and we - // are giving this task access to the item `item`, we must - // register a read. - assert!(!ccx.tcx.map.is_inlined_def_id(item_def_id)); - ccx.tcx.dep_graph.read(DepNode::Hir(item_def_id)); - compute_type_scheme_of_foreign_item(ccx, item, abi) - }) -} - -fn compute_type_scheme_of_foreign_item<'a, 'tcx>( - ccx: &CrateCtxt<'a, 'tcx>, - it: &hir::ForeignItem, - abi: abi::Abi) - -> ty::TypeScheme<'tcx> -{ - match it.node { - hir::ForeignItemFn(ref fn_decl, ref generics) => { - compute_type_scheme_of_foreign_fn_decl( - ccx, ccx.tcx.map.local_def_id(it.id), - fn_decl, generics, abi) - } - hir::ForeignItemStatic(ref t, _) => { - ty::TypeScheme { - generics: ty::Generics::empty(), - ty: AstConv::ast_ty_to_ty(&ccx.icx(&()), &ExplicitRscope, t) - } - } - } + predicates } fn convert_foreign_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, @@ -1611,95 +1683,20 @@ fn convert_foreign_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, // map, and I regard each time that I use it as a personal and // moral failing, but at the moment it seems like the only // convenient way to extract the ABI. - ndm - let tcx = ccx.tcx; - let abi = tcx.map.get_foreign_abi(it.id); - - let scheme = type_scheme_of_foreign_item(ccx, it, abi); - write_ty_to_tcx(ccx, it.id, scheme.ty); + let def_id = ccx.tcx.map.local_def_id(it.id); + type_scheme_of_def_id(ccx, def_id); - let predicates = match it.node { - hir::ForeignItemFn(_, ref generics) => { - ty_generic_predicates_for_fn(ccx, generics, &ty::GenericPredicates::empty()) - } - hir::ForeignItemStatic(..) => { - ty::GenericPredicates::empty() - } + let no_generics = hir::Generics::empty(); + let generics = match it.node { + hir::ForeignItemFn(_, ref generics) => generics, + hir::ForeignItemStatic(..) => &no_generics }; - let prev_predicates = tcx.predicates.borrow_mut().insert(ccx.tcx.map.local_def_id(it.id), - predicates); + let predicates = ty_generic_predicates(ccx, generics, None, vec![], false); + let prev_predicates = ccx.tcx.predicates.borrow_mut().insert(def_id, predicates); assert!(prev_predicates.is_none()); } -fn ty_generics_for_type<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, generics: &hir::Generics) - -> ty::Generics<'tcx> { - ty_generics(ccx, TypeSpace, generics, &ty::Generics::empty(), true) -} - -fn ty_generics_for_impl<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, generics: &hir::Generics) - -> ty::Generics<'tcx> { - ty_generics(ccx, TypeSpace, generics, &ty::Generics::empty(), false) -} - -fn ty_generic_predicates_for_type_or_impl<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - generics: &hir::Generics) - -> ty::GenericPredicates<'tcx> -{ - ty_generic_predicates(ccx, TypeSpace, generics, &ty::GenericPredicates::empty()) -} - -fn ty_generics_for_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - trait_id: ast::NodeId, - substs: &'tcx Substs<'tcx>, - ast_generics: &hir::Generics) - -> ty::Generics<'tcx> -{ - debug!("ty_generics_for_trait(trait_id={:?}, substs={:?})", - ccx.tcx.map.local_def_id(trait_id), substs); - - let mut generics = ty_generics_for_type(ccx, ast_generics); - - // Add in the self type parameter. - // - // Something of a hack: use the node id for the trait, also as - // the node id for the Self type parameter. - let param_id = trait_id; - - let parent = ccx.tcx.map.get_parent(param_id); - - let def = ty::TypeParameterDef { - space: SelfSpace, - index: 0, - name: keywords::SelfType.name(), - def_id: ccx.tcx.map.local_def_id(param_id), - default_def_id: ccx.tcx.map.local_def_id(parent), - default: None, - object_lifetime_default: ty::ObjectLifetimeDefault::BaseDefault, - }; - - ccx.tcx.ty_param_defs.borrow_mut().insert(param_id, def.clone()); - - generics.types.push(SelfSpace, def); - - return generics; -} - -fn ty_generics_for_fn<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - generics: &hir::Generics, - base_generics: &ty::Generics<'tcx>) - -> ty::Generics<'tcx> -{ - ty_generics(ccx, FnSpace, generics, base_generics, false) -} - -fn ty_generic_predicates_for_fn<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - generics: &hir::Generics, - base_predicates: &ty::GenericPredicates<'tcx>) - -> ty::GenericPredicates<'tcx> -{ - ty_generic_predicates(ccx, FnSpace, generics, base_predicates) -} - // Add the Sized bound, unless the type parameter is marked as `?Sized`. fn add_unsized_bound<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>, bounds: &mut ty::BuiltinBounds, @@ -1765,48 +1762,67 @@ fn early_bound_lifetimes_from_generics<'a, 'tcx, 'hir>( } fn ty_generic_predicates<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - space: ParamSpace, ast_generics: &hir::Generics, - base_predicates: &ty::GenericPredicates<'tcx>) + parent: Option, + super_predicates: Vec>, + has_self: bool) -> ty::GenericPredicates<'tcx> { let tcx = ccx.tcx; - let mut result = base_predicates.clone(); - - // Collect the predicates that were written inline by the user on each - // type parameter (e.g., ``). - for (index, param) in ast_generics.ty_params.iter().enumerate() { - let index = index as u32; - let param_ty = ty::ParamTy::new(space, index, param.name).to_ty(ccx.tcx); - let bounds = compute_bounds(&ccx.icx(&(base_predicates, ast_generics)), - param_ty, - ¶m.bounds, - SizedByDefault::Yes, - None, - param.span); - let predicates = bounds.predicates(ccx.tcx, param_ty); - result.predicates.extend(space, predicates.into_iter()); - } + let parent_count = parent.map_or(0, |def_id| { + let generics = generics_of_def_id(ccx, def_id); + assert_eq!(generics.parent, None); + assert_eq!(generics.parent_regions, 0); + assert_eq!(generics.parent_types, 0); + generics.count() as u32 + }); + let ref base_predicates = match parent { + Some(def_id) => { + assert_eq!(super_predicates, vec![]); + tcx.lookup_predicates(def_id) + } + None => { + ty::GenericPredicates { + parent: None, + predicates: super_predicates.clone() + } + } + }; + let mut predicates = super_predicates; // Collect the region predicates that were declared inline as // well. In the case of parameters declared on a fn or method, we // have to be careful to only iterate over early-bound regions. + let own_start = parent_count + has_self as u32; let early_lifetimes = early_bound_lifetimes_from_generics(ccx, ast_generics); for (index, param) in early_lifetimes.iter().enumerate() { - let index = index as u32; - let region = - ty::ReEarlyBound(ty::EarlyBoundRegion { - space: space, - index: index, - name: param.lifetime.name - }); + let index = own_start + index as u32; + let region = ccx.tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { + index: index, + name: param.lifetime.name + })); for bound in ¶m.bounds { let bound_region = ast_region_to_region(ccx.tcx, bound); let outlives = ty::Binder(ty::OutlivesPredicate(region, bound_region)); - result.predicates.push(space, outlives.to_predicate()); + predicates.push(outlives.to_predicate()); } } + // Collect the predicates that were written inline by the user on each + // type parameter (e.g., ``). + let type_start = own_start + early_lifetimes.len() as u32; + for (index, param) in ast_generics.ty_params.iter().enumerate() { + let index = type_start + index as u32; + let param_ty = ty::ParamTy::new(index, param.name).to_ty(ccx.tcx); + let bounds = compute_bounds(&ccx.icx(&(base_predicates, ast_generics)), + param_ty, + ¶m.bounds, + SizedByDefault::Yes, + None, + param.span); + predicates.extend(bounds.predicates(ccx.tcx, param_ty)); + } + // Add in the bounds that appear in the where-clause let where_clause = &ast_generics.where_clause; for predicate in &where_clause.predicates { @@ -1822,22 +1838,24 @@ fn ty_generic_predicates<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, let mut projections = Vec::new(); let trait_ref = - conv_poly_trait_ref(&ccx.icx(&(base_predicates, ast_generics)), - ty, - poly_trait_ref, - &mut projections); + AstConv::instantiate_poly_trait_ref(&ccx.icx(&(base_predicates, + ast_generics)), + &ExplicitRscope, + poly_trait_ref, + ty, + &mut projections); - result.predicates.push(space, trait_ref.to_predicate()); + predicates.push(trait_ref.to_predicate()); for projection in &projections { - result.predicates.push(space, projection.to_predicate()); + predicates.push(projection.to_predicate()); } } &hir::TyParamBound::RegionTyParamBound(ref lifetime) => { let region = ast_region_to_region(tcx, lifetime); let pred = ty::Binder(ty::OutlivesPredicate(ty, region)); - result.predicates.push(space, ty::Predicate::TypeOutlives(pred)) + predicates.push(ty::Predicate::TypeOutlives(pred)) } } } @@ -1848,7 +1866,7 @@ fn ty_generic_predicates<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, for bound in ®ion_pred.bounds { let r2 = ast_region_to_region(tcx, bound); let pred = ty::Binder(ty::OutlivesPredicate(r1, r2)); - result.predicates.push(space, ty::Predicate::RegionOutlives(pred)) + predicates.push(ty::Predicate::RegionOutlives(pred)) } } @@ -1861,89 +1879,27 @@ fn ty_generic_predicates<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, } } - return result; -} - -fn ty_generics<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - space: ParamSpace, - ast_generics: &hir::Generics, - base_generics: &ty::Generics<'tcx>, - allow_defaults: bool) - -> ty::Generics<'tcx> -{ - let tcx = ccx.tcx; - let mut result = base_generics.clone(); - - let early_lifetimes = early_bound_lifetimes_from_generics(ccx, ast_generics); - for (i, l) in early_lifetimes.iter().enumerate() { - let bounds = l.bounds.iter() - .map(|l| ast_region_to_region(tcx, l)) - .collect(); - let def = ty::RegionParameterDef { name: l.lifetime.name, - space: space, - index: i as u32, - def_id: ccx.tcx.map.local_def_id(l.lifetime.id), - bounds: bounds }; - result.regions.push(space, def); - } - - assert!(result.types.is_empty_in(space)); - - // Now create the real type parameters. - for i in 0..ast_generics.ty_params.len() { - let def = - get_or_create_type_parameter_def(ccx, ast_generics, space, i as u32, allow_defaults); - debug!("ty_generics: def for type param: {:?}, {:?}", def, space); - result.types.push(space, def); + ty::GenericPredicates { + parent: parent, + predicates: predicates } - - result -} - -fn convert_default_type_parameter<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - path: &P, - space: ParamSpace, - index: u32) - -> Ty<'tcx> -{ - let ty = AstConv::ast_ty_to_ty(&ccx.icx(&()), &ExplicitRscope, &path); - - for leaf_ty in ty.walk() { - if let ty::TyParam(p) = leaf_ty.sty { - if p.space == space && p.idx >= index { - struct_span_err!(ccx.tcx.sess, path.span, E0128, - "type parameters with a default cannot use \ - forward declared identifiers") - .span_label(path.span, &format!("defaulted type parameters \ - cannot be forward declared")) - .emit(); - - return ccx.tcx.types.err - } - } - } - - ty } fn get_or_create_type_parameter_def<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, ast_generics: &hir::Generics, - space: ParamSpace, index: u32, + param: &hir::TyParam, allow_defaults: bool) -> ty::TypeParameterDef<'tcx> { - let param = &ast_generics.ty_params[index as usize]; - let tcx = ccx.tcx; match tcx.ty_param_defs.borrow().get(¶m.id) { Some(d) => { return d.clone(); } None => { } } - let default = param.default.as_ref().map( - |def| convert_default_type_parameter(ccx, def, space, index) - ); + let default = + param.default.as_ref().map(|def| ccx.icx(&()).to_ty(&ExplicitRscope, def)); let object_lifetime_default = compute_object_lifetime_default(ccx, param.id, @@ -1963,7 +1919,6 @@ fn get_or_create_type_parameter_def<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, } let def = ty::TypeParameterDef { - space: space, index: index, name: param.name, def_id: ccx.tcx.map.local_def_id(param.id), @@ -1972,8 +1927,14 @@ fn get_or_create_type_parameter_def<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, object_lifetime_default: object_lifetime_default, }; + if def.name == keywords::SelfType.name() { + span_bug!(param.span, "`Self` should not be the name of a regular parameter"); + } + tcx.ty_param_defs.borrow_mut().insert(param.id, def.clone()); + debug!("get_or_create_type_parameter_def: def for type param: {:?}", def); + def } @@ -1987,13 +1948,13 @@ fn compute_object_lifetime_default<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, param_id: ast::NodeId, param_bounds: &[hir::TyParamBound], where_clause: &hir::WhereClause) - -> ty::ObjectLifetimeDefault + -> ty::ObjectLifetimeDefault<'tcx> { let inline_bounds = from_bounds(ccx, param_bounds); let where_bounds = from_predicates(ccx, param_id, &where_clause.predicates); - let all_bounds: HashSet<_> = inline_bounds.into_iter() - .chain(where_bounds) - .collect(); + let all_bounds: FnvHashSet<_> = inline_bounds.into_iter() + .chain(where_bounds) + .collect(); return if all_bounds.len() > 1 { ty::ObjectLifetimeDefault::Ambiguous } else if all_bounds.len() == 0 { @@ -2005,7 +1966,7 @@ fn compute_object_lifetime_default<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, fn from_bounds<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, bounds: &[hir::TyParamBound]) - -> Vec + -> Vec<&'tcx ty::Region> { bounds.iter() .filter_map(|bound| { @@ -2022,7 +1983,7 @@ fn compute_object_lifetime_default<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, fn from_predicates<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, param_id: ast::NodeId, predicates: &[hir::WherePredicate]) - -> Vec + -> Vec<&'tcx ty::Region> { predicates.iter() .flat_map(|predicate| { @@ -2076,7 +2037,7 @@ pub fn compute_bounds<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>, let mut trait_bounds: Vec<_> = trait_bounds.iter().map(|&bound| { astconv.instantiate_poly_trait_ref(&rscope, bound, - Some(param_ty), + param_ty, &mut projection_bounds) }).collect(); @@ -2107,7 +2068,10 @@ fn predicates_from_bound<'tcx>(astconv: &AstConv<'tcx, 'tcx>, match *bound { hir::TraitTyParamBound(ref tr, hir::TraitBoundModifier::None) => { let mut projections = Vec::new(); - let pred = conv_poly_trait_ref(astconv, param_ty, tr, &mut projections); + let pred = astconv.instantiate_poly_trait_ref(&ExplicitRscope, + tr, + param_ty, + &mut projections); projections.into_iter() .map(|p| p.to_predicate()) .chain(Some(pred.to_predicate())) @@ -2124,29 +2088,14 @@ fn predicates_from_bound<'tcx>(astconv: &AstConv<'tcx, 'tcx>, } } -fn conv_poly_trait_ref<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>, - param_ty: Ty<'tcx>, - trait_ref: &hir::PolyTraitRef, - projections: &mut Vec>) - -> ty::PolyTraitRef<'tcx> -{ - AstConv::instantiate_poly_trait_ref(astconv, - &ExplicitRscope, - trait_ref, - Some(param_ty), - projections) -} - -fn compute_type_scheme_of_foreign_fn_decl<'a, 'tcx>( +fn compute_type_of_foreign_fn_decl<'a, 'tcx>( ccx: &CrateCtxt<'a, 'tcx>, - id: DefId, + def_id: DefId, decl: &hir::FnDecl, ast_generics: &hir::Generics, abi: abi::Abi) - -> ty::TypeScheme<'tcx> + -> Ty<'tcx> { - let ty_generics = ty_generics_for_fn(ccx, ast_generics, &ty::Generics::empty()); - let rb = BindingRscope::new(); let input_tys = decl.inputs .iter() @@ -2182,67 +2131,61 @@ fn compute_type_scheme_of_foreign_fn_decl<'a, 'tcx>( } } - let substs = mk_item_substs(ccx.tcx, &ty_generics); - let t_fn = ccx.tcx.mk_fn_def(id, substs, ccx.tcx.mk_bare_fn(ty::BareFnTy { + let id = ccx.tcx.map.as_local_node_id(def_id).unwrap(); + let substs = mk_item_substs(&ccx.icx(ast_generics), ccx.tcx.map.span(id), def_id); + ccx.tcx.mk_fn_def(def_id, substs, ccx.tcx.mk_bare_fn(ty::BareFnTy { abi: abi, unsafety: hir::Unsafety::Unsafe, sig: ty::Binder(ty::FnSig {inputs: input_tys, output: output, variadic: decl.variadic}), - })); - - ty::TypeScheme { - generics: ty_generics, - ty: t_fn - } + })) } -pub fn mk_item_substs<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, - ty_generics: &ty::Generics) - -> &'tcx Substs<'tcx> -{ - let types = - ty_generics.types.map( - |def| tcx.mk_param_from_def(def)); - - let regions = - ty_generics.regions.map( - |def| def.to_early_bound_region()); +pub fn mk_item_substs<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>, + span: Span, + def_id: DefId) + -> &'tcx Substs<'tcx> { + let tcx = astconv.tcx(); + // FIXME(eddyb) Do this request from Substs::for_item in librustc. + if let Err(ErrorReported) = astconv.get_generics(span, def_id) { + // No convenient way to recover from a cycle here. Just bail. Sorry! + tcx.sess.abort_if_errors(); + bug!("ErrorReported returned, but no errors reports?") + } - tcx.mk_substs(Substs::new(types, regions)) + Substs::for_item(tcx, def_id, + |def, _| tcx.mk_region(def.to_early_bound_region()), + |def, _| tcx.mk_param_from_def(def)) } /// Checks that all the type parameters on an impl fn enforce_impl_params_are_constrained<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - ast_generics: &hir::Generics, + generics: &hir::Generics, impl_predicates: &mut ty::GenericPredicates<'tcx>, impl_def_id: DefId) { let impl_scheme = ccx.tcx.lookup_item_type(impl_def_id); let impl_trait_ref = ccx.tcx.impl_trait_ref(impl_def_id); - assert!(impl_predicates.predicates.is_empty_in(FnSpace)); - assert!(impl_predicates.predicates.is_empty_in(SelfSpace)); - // The trait reference is an input, so find all type parameters // reachable from there, to start (if this is an inherent impl, // then just examine the self type). - let mut input_parameters: HashSet<_> = + let mut input_parameters: FnvHashSet<_> = ctp::parameters_for(&impl_scheme.ty, false).into_iter().collect(); if let Some(ref trait_ref) = impl_trait_ref { input_parameters.extend(ctp::parameters_for(trait_ref, false)); } - ctp::setup_constraining_predicates(impl_predicates.predicates.get_mut_slice(TypeSpace), + ctp::setup_constraining_predicates(&mut impl_predicates.predicates, impl_trait_ref, &mut input_parameters); - for (index, ty_param) in ast_generics.ty_params.iter().enumerate() { - let param_ty = ty::ParamTy { space: TypeSpace, - idx: index as u32, - name: ty_param.name }; - if !input_parameters.contains(&ctp::Parameter::Type(param_ty)) { - report_unused_parameter(ccx, ty_param.span, "type", ¶m_ty.to_string()); + let ty_generics = generics_of_def_id(ccx, impl_def_id); + for (ty_param, param) in ty_generics.types.iter().zip(&generics.ty_params) { + let param_ty = ty::ParamTy::for_def(ty_param); + if !input_parameters.contains(&ctp::Parameter::from(param_ty)) { + report_unused_parameter(ccx, param.span, "type", ¶m_ty.to_string()); } } } @@ -2257,7 +2200,7 @@ fn enforce_impl_lifetimes_are_constrained<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, let impl_predicates = ccx.tcx.lookup_predicates(impl_def_id); let impl_trait_ref = ccx.tcx.impl_trait_ref(impl_def_id); - let mut input_parameters: HashSet<_> = + let mut input_parameters: FnvHashSet<_> = ctp::parameters_for(&impl_scheme.ty, false).into_iter().collect(); if let Some(ref trait_ref) = impl_trait_ref { input_parameters.extend(ctp::parameters_for(trait_ref, false)); @@ -2265,29 +2208,26 @@ fn enforce_impl_lifetimes_are_constrained<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, ctp::identify_constrained_type_params( &impl_predicates.predicates.as_slice(), impl_trait_ref, &mut input_parameters); - let lifetimes_in_associated_types: HashSet<_> = impl_items.iter() + let lifetimes_in_associated_types: FnvHashSet<_> = impl_items.iter() .map(|item| ccx.tcx.impl_or_trait_item(ccx.tcx.map.local_def_id(item.id))) .filter_map(|item| match item { ty::TypeTraitItem(ref assoc_ty) => assoc_ty.ty, ty::ConstTraitItem(..) | ty::MethodTraitItem(..) => None }) .flat_map(|ty| ctp::parameters_for(&ty, true)) - .filter_map(|p| match p { - ctp::Parameter::Type(_) => None, - ctp::Parameter::Region(r) => Some(r), - }) .collect(); - for (index, lifetime_def) in ast_generics.lifetimes.iter().enumerate() { - let region = ty::EarlyBoundRegion { space: TypeSpace, - index: index as u32, - name: lifetime_def.lifetime.name }; + for (ty_lifetime, lifetime) in impl_scheme.generics.regions.iter() + .zip(&ast_generics.lifetimes) + { + let param = ctp::Parameter::from(ty_lifetime.to_early_bound_region_data()); + if - lifetimes_in_associated_types.contains(®ion) && // (*) - !input_parameters.contains(&ctp::Parameter::Region(region)) + lifetimes_in_associated_types.contains(¶m) && // (*) + !input_parameters.contains(¶m) { - report_unused_parameter(ccx, lifetime_def.lifetime.span, - "lifetime", ®ion.name.to_string()); + report_unused_parameter(ccx, lifetime.lifetime.span, + "lifetime", &lifetime.lifetime.name.to_string()); } } @@ -2321,7 +2261,6 @@ fn report_unused_parameter(ccx: &CrateCtxt, "the {} parameter `{}` is not constrained by the \ impl trait, self type, or predicates", kind, name) - .span_label(span, &format!("unconstrained lifetime parameter")) + .span_label(span, &format!("unconstrained {} parameter", kind)) .emit(); - } diff --git a/src/librustc_typeck/constrained_type_params.rs b/src/librustc_typeck/constrained_type_params.rs index 7d3bd095a3..39f9e4316b 100644 --- a/src/librustc_typeck/constrained_type_params.rs +++ b/src/librustc_typeck/constrained_type_params.rs @@ -10,12 +10,17 @@ use rustc::ty::{self, Ty}; use rustc::ty::fold::{TypeFoldable, TypeVisitor}; -use std::collections::HashSet; +use rustc::util::nodemap::FnvHashSet; #[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub enum Parameter { - Type(ty::ParamTy), - Region(ty::EarlyBoundRegion), +pub struct Parameter(pub u32); + +impl From for Parameter { + fn from(param: ty::ParamTy) -> Self { Parameter(param.idx) } +} + +impl From for Parameter { + fn from(param: ty::EarlyBoundRegion) -> Self { Parameter(param.index) } } /// If `include_projections` is false, returns the list of parameters that are @@ -49,8 +54,8 @@ impl<'tcx> TypeVisitor<'tcx> for ParameterCollector { // projections are not injective return false; } - ty::TyParam(ref d) => { - self.parameters.push(Parameter::Type(d.clone())); + ty::TyParam(data) => { + self.parameters.push(Parameter::from(data)); } _ => {} } @@ -58,10 +63,10 @@ impl<'tcx> TypeVisitor<'tcx> for ParameterCollector { t.super_visit_with(self) } - fn visit_region(&mut self, r: ty::Region) -> bool { - match r { + fn visit_region(&mut self, r: &'tcx ty::Region) -> bool { + match *r { ty::ReEarlyBound(data) => { - self.parameters.push(Parameter::Region(data)); + self.parameters.push(Parameter::from(data)); } _ => {} } @@ -71,7 +76,7 @@ impl<'tcx> TypeVisitor<'tcx> for ParameterCollector { pub fn identify_constrained_type_params<'tcx>(predicates: &[ty::Predicate<'tcx>], impl_trait_ref: Option>, - input_parameters: &mut HashSet) + input_parameters: &mut FnvHashSet) { let mut predicates = predicates.to_owned(); setup_constraining_predicates(&mut predicates, impl_trait_ref, input_parameters); @@ -120,7 +125,7 @@ pub fn identify_constrained_type_params<'tcx>(predicates: &[ty::Predicate<'tcx>] /// think of any. pub fn setup_constraining_predicates<'tcx>(predicates: &mut [ty::Predicate<'tcx>], impl_trait_ref: Option>, - input_parameters: &mut HashSet) + input_parameters: &mut FnvHashSet) { // The canonical way of doing the needed topological sort // would be a DFS, but getting the graph and its ownership @@ -141,13 +146,15 @@ pub fn setup_constraining_predicates<'tcx>(predicates: &mut [ty::Predicate<'tcx> // * ::Item = T // * T: Debug // * U: Iterator + debug!("setup_constraining_predicates: predicates={:?} \ + impl_trait_ref={:?} input_parameters={:?}", + predicates, impl_trait_ref, input_parameters); let mut i = 0; let mut changed = true; while changed { changed = false; for j in i..predicates.len() { - if let ty::Predicate::Projection(ref poly_projection) = predicates[j] { // Note that we can skip binder here because the impl // trait ref never contains any late-bound regions. @@ -181,5 +188,8 @@ pub fn setup_constraining_predicates<'tcx>(predicates: &mut [ty::Predicate<'tcx> i += 1; changed = true; } + debug!("setup_constraining_predicates: predicates={:?} \ + i={} impl_trait_ref={:?} input_parameters={:?}", + predicates, i, impl_trait_ref, input_parameters); } } diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index 337b87ce99..0d6b43b59c 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -572,7 +572,7 @@ impl Foo for Bar { // error, expected u16, found i16 fn foo(x: i16) { } - // error, values differ in mutability + // error, types differ in mutability fn bar(&mut self) { } } ``` @@ -3422,13 +3422,6 @@ containing the unsized type is the last and only unsized type field in the struct. "##, -E0379: r##" -Trait methods cannot be declared `const` by design. For more information, see -[RFC 911]. - -[RFC 911]: https://github.com/rust-lang/rfcs/pull/911 -"##, - E0380: r##" Default impls are only allowed for traits with no methods or associated items. For more information see the [opt-in builtin traits RFC](https://github.com/rust @@ -3773,6 +3766,45 @@ extern "platform-intrinsic" { ``` "##, +E0513: r##" +The type of the variable couldn't be found out. + +Erroneous code example: + +```compile_fail,E0513 +use std::mem; + +unsafe { + let size = mem::size_of::(); + mem::transmute_copy::(&8_8); + // error: no type for local variable +} +``` + +To fix this error, please use a constant size instead of `size`. To make +this error more obvious, you could run: + +```compile_fail,E0080 +use std::mem; + +unsafe { + mem::transmute_copy::()]>(&8_8); + // error: constant evaluation error +} +``` + +So now, you can fix your code by setting the size directly: + +``` +use std::mem; + +unsafe { + mem::transmute_copy::(&8_8); + // `u32` is 4 bytes so we replace the `mem::size_of` call with its size +} +``` +"##, + E0516: r##" The `typeof` keyword is currently reserved but unimplemented. Erroneous code example: @@ -4018,7 +4050,7 @@ register_diagnostics! { // E0141, // E0159, // use of trait `{}` as struct constructor // E0163, // merged into E0071 - E0167, +// E0167, // E0168, // E0173, // manual implementations of unboxed closure traits are experimental // E0174, @@ -4053,7 +4085,7 @@ register_diagnostics! { // E0235, // structure constructor specifies a structure of type but // E0236, // no lang item for range syntax // E0237, // no lang item for range syntax - E0238, // parenthesized parameters may only be used with a trait +// E0238, // parenthesized parameters may only be used with a trait // E0239, // `next` method of `Iterator` trait has unexpected type // E0240, // E0241, @@ -4071,7 +4103,6 @@ register_diagnostics! { E0399, // trait items need to be implemented because the associated // type `{}` was overridden E0436, // functional record update requires a struct - E0513, // no type for local variable .. E0521, // redundant default implementations of trait E0533, // `{}` does not name a unit variant, unit struct or a constant E0562, // `impl Trait` not allowed outside of function @@ -4079,4 +4110,6 @@ register_diagnostics! { E0563, // cannot determine a type for this `impl Trait`: {} E0564, // only named lifetimes are allowed in `impl Trait`, // but `{}` was found in the type `{}` + E0567, // auto traits can not have type parameters + E0568, // auto-traits can not have predicates, } diff --git a/src/librustc_typeck/lib.rs b/src/librustc_typeck/lib.rs index 11743ade2d..cb9b162cab 100644 --- a/src/librustc_typeck/lib.rs +++ b/src/librustc_typeck/lib.rs @@ -76,11 +76,13 @@ This API is completely unstable and subject to change. #![feature(box_patterns)] #![feature(box_syntax)] +#![feature(conservative_impl_trait)] +#![feature(dotdot_in_tuple_patterns)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] #![feature(staged_api)] -#![feature(question_mark)] +#![cfg_attr(stage0, feature(question_mark))] #[macro_use] extern crate log; #[macro_use] extern crate syntax; @@ -132,7 +134,7 @@ pub mod coherence; pub mod variance; pub struct TypeAndSubsts<'tcx> { - pub substs: Substs<'tcx>, + pub substs: &'tcx Substs<'tcx>, pub ty: Ty<'tcx>, } @@ -172,7 +174,7 @@ fn write_substs_to_tcx<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, node_id, item_substs); - assert!(!item_substs.substs.types.needs_infer()); + assert!(!item_substs.substs.needs_infer()); ccx.tcx.tables.borrow_mut().item_substs.insert(node_id, item_substs); } @@ -215,11 +217,11 @@ fn check_main_fn_ty(ccx: &CrateCtxt, match tcx.map.find(main_id) { Some(hir_map::NodeItem(it)) => { match it.node { - hir::ItemFn(_, _, _, _, ref generics, _) => { - if let Some(gen_span) = generics.span() { - struct_span_err!(ccx.tcx.sess, gen_span, E0131, + hir::ItemFn(.., ref generics, _) => { + if generics.is_parameterized() { + struct_span_err!(ccx.tcx.sess, generics.span, E0131, "main function is not allowed to have type parameters") - .span_label(gen_span, + .span_label(generics.span, &format!("main cannot have type parameters")) .emit(); return; @@ -231,7 +233,7 @@ fn check_main_fn_ty(ccx: &CrateCtxt, _ => () } let main_def_id = tcx.map.local_def_id(main_id); - let substs = tcx.mk_substs(Substs::empty()); + let substs = Substs::empty(tcx); let se_ty = tcx.mk_fn_def(main_def_id, substs, tcx.mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Normal, @@ -267,12 +269,11 @@ fn check_start_fn_ty(ccx: &CrateCtxt, match tcx.map.find(start_id) { Some(hir_map::NodeItem(it)) => { match it.node { - hir::ItemFn(_,_,_,_,ref ps,_) + hir::ItemFn(..,ref ps,_) if ps.is_parameterized() => { - let sp = if let Some(sp) = ps.span() { sp } else { start_span }; - struct_span_err!(tcx.sess, sp, E0132, + struct_span_err!(tcx.sess, ps.span, E0132, "start function is not allowed to have type parameters") - .span_label(sp, + .span_label(ps.span, &format!("start function cannot have type parameters")) .emit(); return; @@ -284,7 +285,7 @@ fn check_start_fn_ty(ccx: &CrateCtxt, } let start_def_id = ccx.tcx.map.local_def_id(start_id); - let substs = tcx.mk_substs(Substs::empty()); + let substs = Substs::empty(tcx); let se_ty = tcx.mk_fn_def(start_def_id, substs, tcx.mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Normal, diff --git a/src/librustc_typeck/rscope.rs b/src/librustc_typeck/rscope.rs index 58d1ec9d02..131ecfc6e0 100644 --- a/src/librustc_typeck/rscope.rs +++ b/src/librustc_typeck/rscope.rs @@ -8,10 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - -use rustc::ty::{self, TyCtxt}; +use rustc::hir::def_id::DefId; +use rustc::ty; use rustc::ty::subst::Substs; +use astconv::AstConv; + use std::cell::Cell; use syntax_pos::Span; @@ -71,33 +73,34 @@ pub trait RegionScope { } #[derive(Copy, Clone)] -pub struct AnonTypeScope<'a> { - generics: &'a ty::Generics<'a> +pub struct AnonTypeScope { + enclosing_item: DefId } -impl<'a, 'b, 'gcx, 'tcx> AnonTypeScope<'a> { - pub fn new(generics: &'a ty::Generics<'a>) -> AnonTypeScope<'a> { +impl<'gcx: 'tcx, 'tcx> AnonTypeScope { + pub fn new(enclosing_item: DefId) -> AnonTypeScope { AnonTypeScope { - generics: generics + enclosing_item: enclosing_item } } - pub fn fresh_substs(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> &'tcx Substs<'tcx> { + pub fn fresh_substs(&self, astconv: &AstConv<'gcx, 'tcx>, span: Span) + -> &'tcx Substs<'tcx> { use collect::mk_item_substs; - mk_item_substs(tcx, self.generics) + mk_item_substs(astconv, span, self.enclosing_item) } } /// A scope wrapper which optionally allows anonymized types. #[derive(Copy, Clone)] -pub struct MaybeWithAnonTypes<'a, R> { +pub struct MaybeWithAnonTypes { base_scope: R, - anon_scope: Option> + anon_scope: Option } -impl<'a, R: RegionScope> MaybeWithAnonTypes<'a, R> { - pub fn new(base_scope: R, anon_scope: Option>) -> Self { +impl MaybeWithAnonTypes { + pub fn new(base_scope: R, anon_scope: Option) -> Self { MaybeWithAnonTypes { base_scope: base_scope, anon_scope: anon_scope @@ -105,7 +108,7 @@ impl<'a, R: RegionScope> MaybeWithAnonTypes<'a, R> { } } -impl<'a, R: RegionScope> RegionScope for MaybeWithAnonTypes<'a, R> { +impl RegionScope for MaybeWithAnonTypes { fn object_lifetime_default(&self, span: Span) -> Option { self.base_scope.object_lifetime_default(span) } @@ -210,6 +213,45 @@ impl RegionScope for ElidableRscope { } } +/// A scope that behaves as an ElidabeRscope with a `'static` default region +/// that should also warn if the `static_in_const` feature is unset. +#[derive(Copy, Clone)] +pub struct StaticRscope<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { + tcx: &'a ty::TyCtxt<'a, 'gcx, 'tcx>, +} + +impl<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> StaticRscope<'a, 'gcx, 'tcx> { + /// create a new StaticRscope from a reference to the `TyCtxt` + pub fn new(tcx: &'a ty::TyCtxt<'a, 'gcx, 'tcx>) -> Self { + StaticRscope { tcx: tcx } + } +} + +impl<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> RegionScope for StaticRscope<'a, 'gcx, 'tcx> { + fn anon_regions(&self, + span: Span, + count: usize) + -> Result, Option>> { + if !self.tcx.sess.features.borrow().static_in_const { + self.tcx + .sess + .struct_span_err(span, + "this needs a `'static` lifetime or the \ + `static_in_const` feature, see #35897") + .emit(); + } + Ok(vec![ty::ReStatic; count]) + } + + fn object_lifetime_default(&self, span: Span) -> Option { + Some(self.base_object_lifetime_default(span)) + } + + fn base_object_lifetime_default(&self, _span: Span) -> ty::Region { + ty::ReStatic + } +} + /// A scope in which we generate anonymous, late-bound regions for /// omitted regions. This occurs in function signatures. pub struct BindingRscope { @@ -254,12 +296,12 @@ impl RegionScope for BindingRscope { /// A scope which overrides the default object lifetime but has no other effect. pub struct ObjectLifetimeDefaultRscope<'r> { base_scope: &'r (RegionScope+'r), - default: ty::ObjectLifetimeDefault, + default: ty::ObjectLifetimeDefault<'r>, } impl<'r> ObjectLifetimeDefaultRscope<'r> { pub fn new(base_scope: &'r (RegionScope+'r), - default: ty::ObjectLifetimeDefault) + default: ty::ObjectLifetimeDefault<'r>) -> ObjectLifetimeDefaultRscope<'r> { ObjectLifetimeDefaultRscope { @@ -280,7 +322,7 @@ impl<'r> RegionScope for ObjectLifetimeDefaultRscope<'r> { Some(self.base_object_lifetime_default(span)), ty::ObjectLifetimeDefault::Specific(r) => - Some(r), + Some(*r), } } diff --git a/src/librustc_typeck/variance/constraints.rs b/src/librustc_typeck/variance/constraints.rs index a4faee8f63..c9e93a1a46 100644 --- a/src/librustc_typeck/variance/constraints.rs +++ b/src/librustc_typeck/variance/constraints.rs @@ -16,8 +16,7 @@ use dep_graph::DepTrackingMapConfig; use hir::def_id::DefId; use middle::resolve_lifetime as rl; -use rustc::ty::subst; -use rustc::ty::subst::ParamSpace; +use rustc::ty::subst::Substs; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::maps::ItemVariances; use rustc::hir::map as hir_map; @@ -27,7 +26,6 @@ use rustc::hir::intravisit::Visitor; use super::terms::*; use super::terms::VarianceTerm::*; -use super::terms::ParamKind::*; use super::xform::*; pub struct ConstraintContext<'a, 'tcx: 'a> { @@ -39,7 +37,7 @@ pub struct ConstraintContext<'a, 'tcx: 'a> { invariant: VarianceTermPtr<'a>, bivariant: VarianceTermPtr<'a>, - pub constraints: Vec> , + pub constraints: Vec>, } /// Declares that the variable `decl_id` appears in a location with @@ -51,8 +49,7 @@ pub struct Constraint<'a> { } pub fn add_constraints_from_crate<'a, 'tcx>(terms_cx: TermsContext<'a, 'tcx>) - -> ConstraintContext<'a, 'tcx> -{ + -> ConstraintContext<'a, 'tcx> { let tcx = terms_cx.tcx; let covariant = terms_cx.arena.alloc(ConstantTerm(ty::Covariant)); let contravariant = terms_cx.arena.alloc(ConstantTerm(ty::Contravariant)); @@ -82,7 +79,9 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ConstraintContext<'a, 'tcx> { debug!("visit_item item={}", tcx.map.node_to_string(item.id)); match item.node { - hir::ItemEnum(..) | hir::ItemStruct(..) => { + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) => { let scheme = tcx.lookup_item_type(did); // Not entirely obvious: constraints on structs/enums do not @@ -113,8 +112,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ConstraintContext<'a, 'tcx> { hir::ItemForeignMod(..) | hir::ItemTy(..) | hir::ItemImpl(..) | - hir::ItemDefaultImpl(..) => { - } + hir::ItemDefaultImpl(..) => {} } } } @@ -122,7 +120,8 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ConstraintContext<'a, 'tcx> { /// Is `param_id` a lifetime according to `map`? fn is_lifetime(map: &hir_map::Map, param_id: ast::NodeId) -> bool { match map.find(param_id) { - Some(hir_map::NodeLifetime(..)) => true, _ => false + Some(hir_map::NodeLifetime(..)) => true, + _ => false, } } @@ -145,13 +144,12 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { let tcx = self.terms_cx.tcx; assert!(is_lifetime(&tcx.map, param_id)); match tcx.named_region_map.defs.get(¶m_id) { - Some(&rl::DefEarlyBoundRegion(_, _, lifetime_decl_id)) - => lifetime_decl_id, + Some(&rl::DefEarlyBoundRegion(_, lifetime_decl_id)) => lifetime_decl_id, Some(_) => bug!("should not encounter non early-bound cases"), // The lookup should only fail when `param_id` is // itself a lifetime binding: use it as the decl_id. - None => param_id, + None => param_id, } } @@ -165,14 +163,15 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { // parameter (by inspecting parent of its binding declaration // to see if it is introduced by a type or by a fn/impl). - let check_result = |this:&ConstraintContext| -> bool { + let check_result = |this: &ConstraintContext| -> bool { let tcx = this.terms_cx.tcx; let decl_id = this.find_binding_for_lifetime(param_id); // Currently only called on lifetimes; double-checking that. assert!(is_lifetime(&tcx.map, param_id)); let parent_id = tcx.map.get_parent(decl_id); - let parent = tcx.map.find(parent_id).unwrap_or_else( - || bug!("tcx.map missing entry for id: {}", parent_id)); + let parent = tcx.map + .find(parent_id) + .unwrap_or_else(|| bug!("tcx.map missing entry for id: {}", parent_id)); let is_inferred; macro_rules! cannot_happen { () => { { @@ -187,14 +186,15 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { hir::ItemTy(..) | hir::ItemEnum(..) | hir::ItemStruct(..) | - hir::ItemTrait(..) => is_inferred = true, - hir::ItemFn(..) => is_inferred = false, - _ => cannot_happen!(), + hir::ItemUnion(..) | + hir::ItemTrait(..) => is_inferred = true, + hir::ItemFn(..) => is_inferred = false, + _ => cannot_happen!(), } } - hir_map::NodeTraitItem(..) => is_inferred = false, - hir_map::NodeImplItem(..) => is_inferred = false, - _ => cannot_happen!(), + hir_map::NodeTraitItem(..) => is_inferred = false, + hir_map::NodeImplItem(..) => is_inferred = false, + _ => cannot_happen!(), } return is_inferred; @@ -210,8 +210,6 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { fn declared_variance(&self, param_def_id: DefId, item_def_id: DefId, - kind: ParamKind, - space: ParamSpace, index: usize) -> VarianceTermPtr<'a> { assert_eq!(param_def_id.krate, item_def_id.krate); @@ -226,32 +224,25 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { // Parameter on an item defined within another crate: // variance already inferred, just look it up. let variances = self.tcx().item_variances(item_def_id); - let variance = match kind { - TypeParam => *variances.types.get(space, index), - RegionParam => *variances.regions.get(space, index), - }; - self.constant_term(variance) + self.constant_term(variances[index]) } } fn add_constraint(&mut self, InferredIndex(index): InferredIndex, variance: VarianceTermPtr<'a>) { - debug!("add_constraint(index={}, variance={:?})", - index, variance); - self.constraints.push(Constraint { inferred: InferredIndex(index), - variance: variance }); + debug!("add_constraint(index={}, variance={:?})", index, variance); + self.constraints.push(Constraint { + inferred: InferredIndex(index), + variance: variance, + }); } - fn contravariant(&mut self, - variance: VarianceTermPtr<'a>) - -> VarianceTermPtr<'a> { + fn contravariant(&mut self, variance: VarianceTermPtr<'a>) -> VarianceTermPtr<'a> { self.xform(variance, self.contravariant) } - fn invariant(&mut self, - variance: VarianceTermPtr<'a>) - -> VarianceTermPtr<'a> { + fn invariant(&mut self, variance: VarianceTermPtr<'a>) -> VarianceTermPtr<'a> { self.xform(variance, self.invariant) } @@ -264,23 +255,16 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { } } - fn xform(&mut self, - v1: VarianceTermPtr<'a>, - v2: VarianceTermPtr<'a>) - -> VarianceTermPtr<'a> { + fn xform(&mut self, v1: VarianceTermPtr<'a>, v2: VarianceTermPtr<'a>) -> VarianceTermPtr<'a> { match (*v1, *v2) { (_, ConstantTerm(ty::Covariant)) => { // Applying a "covariant" transform is always a no-op v1 } - (ConstantTerm(c1), ConstantTerm(c2)) => { - self.constant_term(c1.xform(c2)) - } + (ConstantTerm(c1), ConstantTerm(c2)) => self.constant_term(c1.xform(c2)), - _ => { - &*self.terms_cx.arena.alloc(TransformTerm(v1, v2)) - } + _ => &*self.terms_cx.arena.alloc(TransformTerm(v1, v2)), } } @@ -299,13 +283,12 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { // README.md for a discussion on dep-graph management. self.tcx().dep_graph.read(ItemVariances::to_dep_node(&trait_ref.def_id)); - self.add_constraints_from_substs( - generics, - trait_ref.def_id, - trait_def.generics.types.as_slice(), - trait_def.generics.regions.as_slice(), - trait_ref.substs, - variance); + self.add_constraints_from_substs(generics, + trait_ref.def_id, + &trait_def.generics.types, + &trait_def.generics.regions, + trait_ref.substs, + variance); } /// Adds constraints appropriate for an instance of `ty` appearing @@ -320,27 +303,28 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { variance); match ty.sty { - ty::TyBool | - ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | - ty::TyFloat(_) | ty::TyStr | ty::TyNever => { - /* leaf type -- noop */ + ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | + ty::TyStr | ty::TyNever => { + // leaf type -- noop } - ty::TyClosure(..) | ty::TyAnon(..) => { + ty::TyClosure(..) | + ty::TyAnon(..) => { bug!("Unexpected closure type in variance computation"); } ty::TyRef(region, ref mt) => { let contra = self.contravariant(variance); - self.add_constraints_from_region(generics, *region, contra); + self.add_constraints_from_region(generics, region, contra); self.add_constraints_from_mt(generics, mt, variance); } - ty::TyBox(typ) | ty::TyArray(typ, _) | ty::TySlice(typ) => { + ty::TyBox(typ) | + ty::TyArray(typ, _) | + ty::TySlice(typ) => { self.add_constraints_from_ty(generics, typ, variance); } - ty::TyRawPtr(ref mt) => { self.add_constraints_from_mt(generics, mt, variance); } @@ -351,8 +335,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { } } - ty::TyEnum(def, substs) | - ty::TyStruct(def, substs) => { + ty::TyAdt(def, substs) => { let item_type = self.tcx().lookup_item_type(def.did); // This edge is actually implied by the call to @@ -360,20 +343,12 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { // README.md for a discussion on dep-graph management. self.tcx().dep_graph.read(ItemVariances::to_dep_node(&def.did)); - // All type parameters on enums and structs should be - // in the TypeSpace. - assert!(item_type.generics.types.is_empty_in(subst::SelfSpace)); - assert!(item_type.generics.types.is_empty_in(subst::FnSpace)); - assert!(item_type.generics.regions.is_empty_in(subst::SelfSpace)); - assert!(item_type.generics.regions.is_empty_in(subst::FnSpace)); - - self.add_constraints_from_substs( - generics, - def.did, - item_type.generics.types.get_slice(subst::TypeSpace), - item_type.generics.regions.get_slice(subst::TypeSpace), - substs, - variance); + self.add_constraints_from_substs(generics, + def.did, + &item_type.generics.types, + &item_type.generics.regions, + substs, + variance); } ty::TyProjection(ref data) => { @@ -385,36 +360,34 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { // README.md for a discussion on dep-graph management. self.tcx().dep_graph.read(ItemVariances::to_dep_node(&trait_ref.def_id)); - self.add_constraints_from_substs( - generics, - trait_ref.def_id, - trait_def.generics.types.as_slice(), - trait_def.generics.regions.as_slice(), - trait_ref.substs, - variance); + self.add_constraints_from_substs(generics, + trait_ref.def_id, + &trait_def.generics.types, + &trait_def.generics.regions, + trait_ref.substs, + variance); } ty::TyTrait(ref data) => { - let poly_trait_ref = - data.principal_trait_ref_with_self_ty(self.tcx(), - self.tcx().types.err); - // The type `Foo` is contravariant w/r/t `'a`: let contra = self.contravariant(variance); - self.add_constraints_from_region(generics, data.bounds.region_bound, contra); + self.add_constraints_from_region(generics, data.region_bound, contra); - // Ignore the SelfSpace, it is erased. + let poly_trait_ref = data.principal.with_self_ty(self.tcx(), self.tcx().types.err); self.add_constraints_from_trait_ref(generics, poly_trait_ref.0, variance); - let projections = data.projection_bounds_with_self_ty(self.tcx(), - self.tcx().types.err); - for projection in &projections { + for projection in &data.projection_bounds { self.add_constraints_from_ty(generics, projection.0.ty, self.invariant); } } ty::TyParam(ref data) => { - let def_id = generics.types.get(data.space, data.idx as usize).def_id; + assert_eq!(generics.parent, None); + let mut i = data.idx as usize; + if !generics.has_self || i > 0 { + i -= generics.regions.len(); + } + let def_id = generics.types[i].def_id; let node_id = self.tcx().map.as_local_node_id(def_id).unwrap(); match self.terms_cx.inferred_map.get(&node_id) { Some(&index) => { @@ -428,7 +401,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { } } - ty::TyFnDef(_, _, &ty::BareFnTy { ref sig, .. }) | + ty::TyFnDef(.., &ty::BareFnTy { ref sig, .. }) | ty::TyFnPtr(&ty::BareFnTy { ref sig, .. }) => { self.add_constraints_from_sig(generics, sig, variance); } @@ -440,7 +413,8 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { ty::TyInfer(..) => { bug!("unexpected type encountered in \ - variance inference: {}", ty); + variance inference: {}", + ty); } } } @@ -452,7 +426,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { def_id: DefId, type_param_defs: &[ty::TypeParameterDef<'tcx>], region_param_defs: &[ty::RegionParameterDef], - substs: &subst::Substs<'tcx>, + substs: &Substs<'tcx>, variance: VarianceTermPtr<'a>) { debug!("add_constraints_from_substs(def_id={:?}, substs={:?}, variance={:?})", def_id, @@ -460,22 +434,19 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { variance); for p in type_param_defs { - let variance_decl = - self.declared_variance(p.def_id, def_id, TypeParam, - p.space, p.index as usize); + let variance_decl = self.declared_variance(p.def_id, def_id, p.index as usize); let variance_i = self.xform(variance, variance_decl); - let substs_ty = *substs.types.get(p.space, p.index as usize); + let substs_ty = substs.type_for_def(p); debug!("add_constraints_from_substs: variance_decl={:?} variance_i={:?}", - variance_decl, variance_i); + variance_decl, + variance_i); self.add_constraints_from_ty(generics, substs_ty, variance_i); } for p in region_param_defs { - let variance_decl = - self.declared_variance(p.def_id, def_id, - RegionParam, p.space, p.index as usize); + let variance_decl = self.declared_variance(p.def_id, def_id, p.index as usize); let variance_i = self.xform(variance, variance_decl); - let substs_r = *substs.regions.get(p.space, p.index as usize); + let substs_r = substs.region_for_def(p); self.add_constraints_from_region(generics, substs_r, variance_i); } } @@ -497,12 +468,13 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { /// context with ambient variance `variance` fn add_constraints_from_region(&mut self, generics: &ty::Generics<'tcx>, - region: ty::Region, + region: &'tcx ty::Region, variance: VarianceTermPtr<'a>) { - match region { + match *region { ty::ReEarlyBound(ref data) => { - let def_id = - generics.regions.get(data.space, data.index as usize).def_id; + assert_eq!(generics.parent, None); + let i = data.index as usize - generics.has_self as usize; + let def_id = generics.regions[i].def_id; let node_id = self.tcx().map.as_local_node_id(def_id).unwrap(); if self.is_to_be_inferred(node_id) { let index = self.inferred_index(node_id); @@ -510,15 +482,19 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { } } - ty::ReStatic => { } + ty::ReStatic => {} ty::ReLateBound(..) => { // We do not infer variance for region parameters on // methods or in fn types. } - ty::ReFree(..) | ty::ReScope(..) | ty::ReVar(..) | - ty::ReSkolemized(..) | ty::ReEmpty | ty::ReErased => { + ty::ReFree(..) | + ty::ReScope(..) | + ty::ReVar(..) | + ty::ReSkolemized(..) | + ty::ReEmpty | + ty::ReErased => { // We don't expect to see anything but 'static or bound // regions when visiting member types or method types. bug!("unexpected region encountered in variance \ diff --git a/src/librustc_typeck/variance/mod.rs b/src/librustc_typeck/variance/mod.rs index 13ed6cf764..cd0ab1cbb9 100644 --- a/src/librustc_typeck/variance/mod.rs +++ b/src/librustc_typeck/variance/mod.rs @@ -34,4 +34,3 @@ pub fn infer_variance<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { solve::solve_constraints(constraints_cx); tcx.variance_computed.set(true); } - diff --git a/src/librustc_typeck/variance/solve.rs b/src/librustc_typeck/variance/solve.rs index 0763cfd7e2..a5c53b4c62 100644 --- a/src/librustc_typeck/variance/solve.rs +++ b/src/librustc_typeck/variance/solve.rs @@ -15,36 +15,34 @@ //! optimal solution to the constraints. The final variance for each //! inferred is then written into the `variance_map` in the tcx. -use rustc::ty::subst::VecPerParamSpace; use rustc::ty; use std::rc::Rc; use super::constraints::*; use super::terms::*; use super::terms::VarianceTerm::*; -use super::terms::ParamKind::*; use super::xform::*; struct SolveContext<'a, 'tcx: 'a> { terms_cx: TermsContext<'a, 'tcx>, - constraints: Vec> , + constraints: Vec>, // Maps from an InferredIndex to the inferred value for that variable. - solutions: Vec + solutions: Vec, } pub fn solve_constraints(constraints_cx: ConstraintContext) { let ConstraintContext { terms_cx, constraints, .. } = constraints_cx; - let solutions = - terms_cx.inferred_infos.iter() - .map(|ii| ii.initial_variance) - .collect(); + let solutions = terms_cx.inferred_infos + .iter() + .map(|ii| ii.initial_variance) + .collect(); let mut solutions_cx = SolveContext { terms_cx: terms_cx, constraints: constraints, - solutions: solutions + solutions: solutions, }; solutions_cx.solve(); solutions_cx.write(); @@ -70,13 +68,13 @@ impl<'a, 'tcx> SolveContext<'a, 'tcx> { if old_value != new_value { debug!("Updating inferred {} (node {}) \ from {:?} to {:?} due to {:?}", - inferred, - self.terms_cx - .inferred_infos[inferred] - .param_id, - old_value, - new_value, - term); + inferred, + self.terms_cx + .inferred_infos[inferred] + .param_id, + old_value, + new_value, + term); self.solutions[inferred] = new_value; changed = true; @@ -109,49 +107,47 @@ impl<'a, 'tcx> SolveContext<'a, 'tcx> { let num_inferred = self.terms_cx.num_inferred(); while index < num_inferred { let item_id = inferred_infos[index].item_id; - let mut types = VecPerParamSpace::empty(); - let mut regions = VecPerParamSpace::empty(); + + let mut item_variances = vec![]; while index < num_inferred && inferred_infos[index].item_id == item_id { let info = &inferred_infos[index]; let variance = solutions[index]; - debug!("Index {} Info {} / {:?} / {:?} Variance {:?}", - index, info.index, info.kind, info.space, variance); - match info.kind { - TypeParam => { types.push(info.space, variance); } - RegionParam => { regions.push(info.space, variance); } - } + debug!("Index {} Info {} Variance {:?}", + index, + info.index, + variance); + assert_eq!(item_variances.len(), info.index); + item_variances.push(variance); index += 1; } - let item_variances = ty::ItemVariances { - types: types, - regions: regions - }; - debug!("item_id={} item_variances={:?}", - item_id, - item_variances); + debug!("item_id={} item_variances={:?}", item_id, item_variances); let item_def_id = tcx.map.local_def_id(item_id); // For unit testing: check for a special "rustc_variance" // attribute and report an error with various results if found. if tcx.has_attr(item_def_id, "rustc_variance") { - span_err!(tcx.sess, tcx.map.span(item_id), E0208, "{:?}", item_variances); + span_err!(tcx.sess, + tcx.map.span(item_id), + E0208, + "{:?}", + item_variances); } - let newly_added = tcx.item_variance_map.borrow_mut() - .insert(item_def_id, Rc::new(item_variances)).is_none(); + let newly_added = tcx.item_variance_map + .borrow_mut() + .insert(item_def_id, Rc::new(item_variances)) + .is_none(); assert!(newly_added); } } fn evaluate(&self, term: VarianceTermPtr<'a>) -> ty::Variance { match *term { - ConstantTerm(v) => { - v - } + ConstantTerm(v) => v, TransformTerm(t1, t2) => { let v1 = self.evaluate(t1); @@ -159,9 +155,7 @@ impl<'a, 'tcx> SolveContext<'a, 'tcx> { v1.xform(v2) } - InferredTerm(InferredIndex(index)) => { - self.solutions[index] - } + InferredTerm(InferredIndex(index)) => self.solutions[index], } } } diff --git a/src/librustc_typeck/variance/terms.rs b/src/librustc_typeck/variance/terms.rs index d9e7e8cbf7..f6732f36e3 100644 --- a/src/librustc_typeck/variance/terms.rs +++ b/src/librustc_typeck/variance/terms.rs @@ -21,7 +21,6 @@ use arena::TypedArena; use dep_graph::DepTrackingMapConfig; -use rustc::ty::subst::{ParamSpace, FnSpace, TypeSpace, SelfSpace, VecPerParamSpace}; use rustc::ty::{self, TyCtxt}; use rustc::ty::maps::ItemVariances; use std::fmt; @@ -32,7 +31,6 @@ use rustc::hir::intravisit::Visitor; use util::nodemap::NodeMap; use self::VarianceTerm::*; -use self::ParamKind::*; pub type VarianceTermPtr<'a> = &'a VarianceTerm<'a>; @@ -51,7 +49,12 @@ impl<'a> fmt::Debug for VarianceTerm<'a> { match *self { ConstantTerm(c1) => write!(f, "{:?}", c1), TransformTerm(v1, v2) => write!(f, "({:?} \u{00D7} {:?})", v1, v2), - InferredTerm(id) => write!(f, "[{}]", { let InferredIndex(i) = id; i }) + InferredTerm(id) => { + write!(f, "[{}]", { + let InferredIndex(i) = id; + i + }) + } } } } @@ -62,7 +65,7 @@ pub struct TermsContext<'a, 'tcx: 'a> { pub tcx: TyCtxt<'a, 'tcx, 'tcx>, pub arena: &'a TypedArena>, - pub empty_variances: Rc, + pub empty_variances: Rc>, // For marker types, UnsafeCell, and other lang items where // variance is hardcoded, records the item-id and the hardcoded @@ -74,19 +77,11 @@ pub struct TermsContext<'a, 'tcx: 'a> { pub inferred_map: NodeMap, // Maps from an InferredIndex to the info for that variable. - pub inferred_infos: Vec> , -} - -#[derive(Copy, Clone, Debug, PartialEq)] -pub enum ParamKind { - TypeParam, - RegionParam, + pub inferred_infos: Vec>, } pub struct InferredInfo<'a> { pub item_id: ast::NodeId, - pub kind: ParamKind, - pub space: ParamSpace, pub index: usize, pub param_id: ast::NodeId, pub term: VarianceTermPtr<'a>, @@ -97,11 +92,9 @@ pub struct InferredInfo<'a> { pub initial_variance: ty::Variance, } -pub fn determine_parameters_to_be_inferred<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, - arena: &'a mut TypedArena>) - -> TermsContext<'a, 'tcx> -{ +pub fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + arena: &'a mut TypedArena>) + -> TermsContext<'a, 'tcx> { let mut terms_cx = TermsContext { tcx: tcx, arena: arena, @@ -112,20 +105,16 @@ pub fn determine_parameters_to_be_inferred<'a, 'tcx>( // cache and share the variance struct used for items with // no type/region parameters - empty_variances: Rc::new(ty::ItemVariances { - types: VecPerParamSpace::empty(), - regions: VecPerParamSpace::empty() - }) + empty_variances: Rc::new(vec![]), }; // See README.md for a discussion on dep-graph management. - tcx.visit_all_items_in_krate(|def_id| ItemVariances::to_dep_node(&def_id), - &mut terms_cx); + tcx.visit_all_items_in_krate(|def_id| ItemVariances::to_dep_node(&def_id), &mut terms_cx); terms_cx } -fn lang_items(tcx: TyCtxt) -> Vec<(ast::NodeId,Vec)> { +fn lang_items(tcx: TyCtxt) -> Vec<(ast::NodeId, Vec)> { let all = vec![ (tcx.lang_items.phantom_data(), vec![ty::Covariant]), (tcx.lang_items.unsafe_cell_type(), vec![ty::Invariant]), @@ -151,15 +140,13 @@ impl<'a, 'tcx> TermsContext<'a, 'tcx> { fn add_inferreds_for_item(&mut self, item_id: ast::NodeId, has_self: bool, - generics: &hir::Generics) - { - /*! - * Add "inferreds" for the generic parameters declared on this - * item. This has a lot of annoying parameters because we are - * trying to drive this from the AST, rather than the - * ty::Generics, so that we can get span info -- but this - * means we must accommodate syntactic distinctions. - */ + generics: &hir::Generics) { + //! Add "inferreds" for the generic parameters declared on this + //! item. This has a lot of annoying parameters because we are + //! trying to drive this from the AST, rather than the + //! ty::Generics, so that we can get span info -- but this + //! means we must accommodate syntactic distinctions. + //! // NB: In the code below for writing the results back into the // tcx, we rely on the fact that all inferreds for a particular @@ -168,16 +155,18 @@ impl<'a, 'tcx> TermsContext<'a, 'tcx> { let inferreds_on_entry = self.num_inferred(); if has_self { - self.add_inferred(item_id, TypeParam, SelfSpace, 0, item_id); + self.add_inferred(item_id, 0, item_id); } for (i, p) in generics.lifetimes.iter().enumerate() { let id = p.lifetime.id; - self.add_inferred(item_id, RegionParam, TypeSpace, i, id); + let i = has_self as usize + i; + self.add_inferred(item_id, i, id); } for (i, p) in generics.ty_params.iter().enumerate() { - self.add_inferred(item_id, TypeParam, TypeSpace, i, p.id); + let i = has_self as usize + generics.lifetimes.len() + i; + self.add_inferred(item_id, i, p.id); } // If this item has no type or lifetime parameters, @@ -189,63 +178,47 @@ impl<'a, 'tcx> TermsContext<'a, 'tcx> { // parameters". if self.num_inferred() == inferreds_on_entry { let item_def_id = self.tcx.map.local_def_id(item_id); - let newly_added = - self.tcx.item_variance_map.borrow_mut().insert( - item_def_id, - self.empty_variances.clone()).is_none(); + let newly_added = self.tcx + .item_variance_map + .borrow_mut() + .insert(item_def_id, self.empty_variances.clone()) + .is_none(); assert!(newly_added); } } - fn add_inferred(&mut self, - item_id: ast::NodeId, - kind: ParamKind, - space: ParamSpace, - index: usize, - param_id: ast::NodeId) { + fn add_inferred(&mut self, item_id: ast::NodeId, index: usize, param_id: ast::NodeId) { let inf_index = InferredIndex(self.inferred_infos.len()); let term = self.arena.alloc(InferredTerm(inf_index)); - let initial_variance = self.pick_initial_variance(item_id, space, index); - self.inferred_infos.push(InferredInfo { item_id: item_id, - kind: kind, - space: space, - index: index, - param_id: param_id, - term: term, - initial_variance: initial_variance }); + let initial_variance = self.pick_initial_variance(item_id, index); + self.inferred_infos.push(InferredInfo { + item_id: item_id, + index: index, + param_id: param_id, + term: term, + initial_variance: initial_variance, + }); let newly_added = self.inferred_map.insert(param_id, inf_index).is_none(); assert!(newly_added); debug!("add_inferred(item_path={}, \ item_id={}, \ - kind={:?}, \ - space={:?}, \ index={}, \ param_id={}, \ inf_index={:?}, \ initial_variance={:?})", self.tcx.item_path_str(self.tcx.map.local_def_id(item_id)), - item_id, kind, space, index, param_id, inf_index, + item_id, + index, + param_id, + inf_index, initial_variance); } - fn pick_initial_variance(&self, - item_id: ast::NodeId, - space: ParamSpace, - index: usize) - -> ty::Variance - { - match space { - SelfSpace | FnSpace => { - ty::Bivariant - } - - TypeSpace => { - match self.lang_items.iter().find(|&&(n, _)| n == item_id) { - Some(&(_, ref variances)) => variances[index], - None => ty::Bivariant - } - } + fn pick_initial_variance(&self, item_id: ast::NodeId, index: usize) -> ty::Variance { + match self.lang_items.iter().find(|&&(n, _)| n == item_id) { + Some(&(_, ref variances)) => variances[index], + None => ty::Bivariant, } } @@ -256,14 +229,16 @@ impl<'a, 'tcx> TermsContext<'a, 'tcx> { impl<'a, 'tcx, 'v> Visitor<'v> for TermsContext<'a, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { - debug!("add_inferreds for item {}", self.tcx.map.node_to_string(item.id)); + debug!("add_inferreds for item {}", + self.tcx.map.node_to_string(item.id)); match item.node { hir::ItemEnum(_, ref generics) | - hir::ItemStruct(_, ref generics) => { + hir::ItemStruct(_, ref generics) | + hir::ItemUnion(_, ref generics) => { self.add_inferreds_for_item(item.id, false, generics); } - hir::ItemTrait(_, ref generics, _, _) => { + hir::ItemTrait(_, ref generics, ..) => { // Note: all inputs for traits are ultimately // constrained to be invariant. See `visit_item` in // the impl for `ConstraintContext` in `constraints.rs`. @@ -279,9 +254,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for TermsContext<'a, 'tcx> { hir::ItemFn(..) | hir::ItemMod(..) | hir::ItemForeignMod(..) | - hir::ItemTy(..) => { - } + hir::ItemTy(..) => {} } } } - diff --git a/src/librustc_typeck/variance/xform.rs b/src/librustc_typeck/variance/xform.rs index 02a2ceb360..507734ce35 100644 --- a/src/librustc_typeck/variance/xform.rs +++ b/src/librustc_typeck/variance/xform.rs @@ -47,7 +47,8 @@ pub fn glb(v1: ty::Variance, v2: ty::Variance) -> ty::Variance { // - + // o match (v1, v2) { - (ty::Invariant, _) | (_, ty::Invariant) => ty::Invariant, + (ty::Invariant, _) | + (_, ty::Invariant) => ty::Invariant, (ty::Covariant, ty::Contravariant) => ty::Invariant, (ty::Contravariant, ty::Covariant) => ty::Invariant, @@ -56,6 +57,7 @@ pub fn glb(v1: ty::Variance, v2: ty::Variance) -> ty::Variance { (ty::Contravariant, ty::Contravariant) => ty::Contravariant, - (x, ty::Bivariant) | (ty::Bivariant, x) => x, + (x, ty::Bivariant) | + (ty::Bivariant, x) => x, } } diff --git a/src/librustc_unicode/char.rs b/src/librustc_unicode/char.rs index 81856cb87c..5a0c27d9c6 100644 --- a/src/librustc_unicode/char.rs +++ b/src/librustc_unicode/char.rs @@ -29,6 +29,7 @@ #![stable(feature = "rust1", since = "1.0.0")] use core::char::CharExt as C; +use core::iter::FusedIterator; use core::fmt; use tables::{conversions, derived_property, general_category, property}; @@ -39,6 +40,8 @@ pub use core::char::{MAX, from_digit, from_u32, from_u32_unchecked}; pub use core::char::{EncodeUtf16, EncodeUtf8, EscapeDebug, EscapeDefault, EscapeUnicode}; // unstable reexports +#[unstable(feature = "try_from", issue = "33417")] +pub use core::char::CharTryFromError; #[unstable(feature = "decode_utf8", issue = "33906")] pub use core::char::{DecodeUtf8, decode_utf8}; #[unstable(feature = "unicode", issue = "27783")] @@ -62,6 +65,9 @@ impl Iterator for ToLowercase { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for ToLowercase {} + /// Returns an iterator that yields the uppercase equivalent of a `char`. /// /// This `struct` is created by the [`to_uppercase()`] method on [`char`]. See @@ -80,6 +86,8 @@ impl Iterator for ToUppercase { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for ToUppercase {} enum CaseMappingIter { Three(char, char, char), diff --git a/src/librustc_unicode/lib.rs b/src/librustc_unicode/lib.rs index 3ae905eba2..65bd717e01 100644 --- a/src/librustc_unicode/lib.rs +++ b/src/librustc_unicode/lib.rs @@ -35,8 +35,10 @@ #![feature(char_escape_debug)] #![feature(core_char_ext)] #![feature(decode_utf8)] +#![feature(fused)] #![feature(lang_items)] #![feature(staged_api)] +#![feature(try_from)] #![feature(unicode)] mod tables; diff --git a/src/librustc_unicode/u_str.rs b/src/librustc_unicode/u_str.rs index 0bac44b837..eb5b6feeb7 100644 --- a/src/librustc_unicode/u_str.rs +++ b/src/librustc_unicode/u_str.rs @@ -14,7 +14,7 @@ //! methods provided by the unicode parts of the CharExt trait. use core::char; -use core::iter::Filter; +use core::iter::{Filter, FusedIterator}; use core::str::Split; /// An iterator over the non-whitespace substrings of a string, @@ -177,6 +177,10 @@ impl Iterator for Utf16Encoder } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Utf16Encoder + where I: FusedIterator {} + impl<'a> Iterator for SplitWhitespace<'a> { type Item = &'a str; @@ -189,3 +193,6 @@ impl<'a> DoubleEndedIterator for SplitWhitespace<'a> { self.inner.next_back() } } + +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for SplitWhitespace<'a> {} diff --git a/src/librustdoc/Cargo.toml b/src/librustdoc/Cargo.toml index 3e510bdc90..d66d2001f2 100644 --- a/src/librustdoc/Cargo.toml +++ b/src/librustdoc/Cargo.toml @@ -16,6 +16,7 @@ rustc_back = { path = "../librustc_back" } rustc_const_eval = { path = "../librustc_const_eval" } rustc_const_math = { path = "../librustc_const_math" } rustc_driver = { path = "../librustc_driver" } +rustc_data_structures = { path = "../librustc_data_structures" } rustc_errors = { path = "../librustc_errors" } rustc_lint = { path = "../librustc_lint" } rustc_metadata = { path = "../librustc_metadata" } diff --git a/src/librustdoc/clean/inline.rs b/src/librustdoc/clean/inline.rs index 3d6925041c..c4d6ff43ef 100644 --- a/src/librustdoc/clean/inline.rs +++ b/src/librustdoc/clean/inline.rs @@ -10,18 +10,17 @@ //! Support for inlining external documentation into the current AST. -use std::collections::HashSet; use std::iter::once; use syntax::ast; use rustc::hir; -use rustc::middle::cstore; use rustc::hir::def::Def; use rustc::hir::def_id::DefId; +use rustc::hir::map::DefPathData; use rustc::hir::print as pprust; -use rustc::ty::{self, TyCtxt}; -use rustc::ty::subst; +use rustc::ty::{self, TyCtxt, VariantKind}; +use rustc::util::nodemap::FnvHashSet; use rustc_const_eval::lookup_const_by_id; @@ -84,20 +83,25 @@ fn try_inline_def<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>, } Def::Struct(did) // If this is a struct constructor, we skip it - if tcx.sess.cstore.tuple_struct_definition_if_ctor(did).is_none() => { + if tcx.def_key(did).disambiguated_data.data != DefPathData::StructCtor => { record_extern_fqn(cx, did, clean::TypeStruct); ret.extend(build_impls(cx, tcx, did)); clean::StructItem(build_struct(cx, tcx, did)) } + Def::Union(did) => { + record_extern_fqn(cx, did, clean::TypeUnion); + ret.extend(build_impls(cx, tcx, did)); + clean::UnionItem(build_union(cx, tcx, did)) + } Def::TyAlias(did) => { record_extern_fqn(cx, did, clean::TypeTypedef); ret.extend(build_impls(cx, tcx, did)); - build_type(cx, tcx, did) + clean::TypedefItem(build_type_alias(cx, tcx, did), false) } Def::Enum(did) => { record_extern_fqn(cx, did, clean::TypeEnum); ret.extend(build_impls(cx, tcx, did)); - build_type(cx, tcx, did) + clean::EnumItem(build_enum(cx, tcx, did)) } // Assume that the enum type is reexported next to the variant, and // variants don't show up in documentation specially. @@ -161,7 +165,7 @@ pub fn build_external_trait<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tc let def = tcx.lookup_trait_def(did); let trait_items = tcx.trait_items(did).clean(cx); let predicates = tcx.lookup_predicates(did); - let generics = (&def.generics, &predicates, subst::TypeSpace).clean(cx); + let generics = (def.generics, &predicates).clean(cx); let generics = filter_non_trait_generics(did, generics); let (generics, supertrait_bounds) = separate_supertrait_bounds(generics); clean::Trait { @@ -176,7 +180,7 @@ fn build_external_function<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx did: DefId) -> clean::Function { let t = tcx.lookup_item_type(did); let (decl, style, abi) = match t.ty.sty { - ty::TyFnDef(_, _, ref f) => ((did, &f.sig).clean(cx), f.unsafety, f.abi), + ty::TyFnDef(.., ref f) => ((did, &f.sig).clean(cx), f.unsafety, f.abi), _ => panic!("bad function"), }; @@ -189,13 +193,25 @@ fn build_external_function<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx let predicates = tcx.lookup_predicates(did); clean::Function { decl: decl, - generics: (&t.generics, &predicates, subst::FnSpace).clean(cx), + generics: (t.generics, &predicates).clean(cx), unsafety: style, constness: constness, abi: abi, } } +fn build_enum<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>, + did: DefId) -> clean::Enum { + let t = tcx.lookup_item_type(did); + let predicates = tcx.lookup_predicates(did); + + clean::Enum { + generics: (t.generics, &predicates).clean(cx), + variants_stripped: false, + variants: tcx.lookup_adt_def(did).variants.clean(cx), + } +} + fn build_struct<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>, did: DefId) -> clean::Struct { let t = tcx.lookup_item_type(did); @@ -203,37 +219,40 @@ fn build_struct<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>, let variant = tcx.lookup_adt_def(did).struct_variant(); clean::Struct { - struct_type: match &variant.fields[..] { - &[] => doctree::Unit, - &[_] if variant.kind == ty::VariantKind::Tuple => doctree::Newtype, - &[..] if variant.kind == ty::VariantKind::Tuple => doctree::Tuple, - _ => doctree::Plain, + struct_type: match variant.kind { + VariantKind::Struct => doctree::Plain, + VariantKind::Tuple => doctree::Tuple, + VariantKind::Unit => doctree::Unit, }, - generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx), + generics: (t.generics, &predicates).clean(cx), fields: variant.fields.clean(cx), fields_stripped: false, } } -fn build_type<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>, - did: DefId) -> clean::ItemEnum { +fn build_union<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>, + did: DefId) -> clean::Union { let t = tcx.lookup_item_type(did); let predicates = tcx.lookup_predicates(did); - match t.ty.sty { - ty::TyEnum(edef, _) if !tcx.sess.cstore.is_typedef(did) => { - return clean::EnumItem(clean::Enum { - generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx), - variants_stripped: false, - variants: edef.variants.clean(cx), - }) - } - _ => {} + let variant = tcx.lookup_adt_def(did).struct_variant(); + + clean::Union { + struct_type: doctree::Plain, + generics: (t.generics, &predicates).clean(cx), + fields: variant.fields.clean(cx), + fields_stripped: false, } +} + +fn build_type_alias<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>, + did: DefId) -> clean::Typedef { + let t = tcx.lookup_item_type(did); + let predicates = tcx.lookup_predicates(did); - clean::TypedefItem(clean::Typedef { + clean::Typedef { type_: t.ty.clean(cx), - generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx), - }, false) + generics: (t.generics, &predicates).clean(cx), + } } pub fn build_impls<'a, 'tcx>(cx: &DocContext, @@ -247,32 +266,49 @@ pub fn build_impls<'a, 'tcx>(cx: &DocContext, build_impl(cx, tcx, did, &mut impls); } } - - // If this is the first time we've inlined something from this crate, then - // we inline *all* impls from the crate into this crate. Note that there's + // If this is the first time we've inlined something from another crate, then + // we inline *all* impls from all the crates into this crate. Note that there's // currently no way for us to filter this based on type, and we likely need // many impls for a variety of reasons. // // Primarily, the impls will be used to populate the documentation for this // type being inlined, but impls can also be used when generating // documentation for primitives (no way to find those specifically). - if cx.populated_crate_impls.borrow_mut().insert(did.krate) { - for item in tcx.sess.cstore.crate_top_level_items(did.krate) { - populate_impls(cx, tcx, item.def, &mut impls); - } + if cx.populated_all_crate_impls.get() { + return impls; + } - fn populate_impls<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>, - def: cstore::DefLike, - impls: &mut Vec) { - match def { - cstore::DlImpl(did) => build_impl(cx, tcx, did, impls), - cstore::DlDef(Def::Mod(did)) => { - for item in tcx.sess.cstore.item_children(did) { - populate_impls(cx, tcx, item.def, impls) - } - } - _ => {} - } + cx.populated_all_crate_impls.set(true); + + for did in tcx.sess.cstore.implementations_of_trait(None) { + build_impl(cx, tcx, did, &mut impls); + } + + // Also try to inline primitive impls from other crates. + let primitive_impls = [ + tcx.lang_items.isize_impl(), + tcx.lang_items.i8_impl(), + tcx.lang_items.i16_impl(), + tcx.lang_items.i32_impl(), + tcx.lang_items.i64_impl(), + tcx.lang_items.usize_impl(), + tcx.lang_items.u8_impl(), + tcx.lang_items.u16_impl(), + tcx.lang_items.u32_impl(), + tcx.lang_items.u64_impl(), + tcx.lang_items.f32_impl(), + tcx.lang_items.f64_impl(), + tcx.lang_items.char_impl(), + tcx.lang_items.str_impl(), + tcx.lang_items.slice_impl(), + tcx.lang_items.slice_impl(), + tcx.lang_items.const_ptr_impl() + ]; + + for def_id in primitive_impls.iter().filter_map(|&def_id| def_id) { + if !def_id.is_local() { + tcx.populate_implementations_for_primitive_if_necessary(def_id); + build_impl(cx, tcx, def_id, &mut impls); } } @@ -331,12 +367,10 @@ pub fn build_impl<'a, 'tcx>(cx: &DocContext, } let predicates = tcx.lookup_predicates(did); - let trait_items = tcx.sess.cstore.impl_items(did) + let trait_items = tcx.sess.cstore.impl_or_trait_items(did) .iter() - .filter_map(|did| { - let did = did.def_id(); - let impl_item = tcx.impl_or_trait_item(did); - match impl_item { + .filter_map(|&did| { + match tcx.impl_or_trait_item(did) { ty::ConstTraitItem(ref assoc_const) => { let did = assoc_const.def_id; let type_scheme = tcx.lookup_item_type(did); @@ -389,14 +423,14 @@ pub fn build_impl<'a, 'tcx>(cx: &DocContext, } ty::TypeTraitItem(ref assoc_ty) => { let did = assoc_ty.def_id; - let type_scheme = ty::TypeScheme { - ty: assoc_ty.ty.unwrap(), - generics: ty::Generics::empty() + let typedef = clean::Typedef { + type_: assoc_ty.ty.unwrap().clean(cx), + generics: clean::Generics { + lifetimes: vec![], + type_params: vec![], + where_predicates: vec![] + } }; - // Not sure the choice of ParamSpace actually matters here, - // because an associated type won't have generics on the LHS - let typedef = (type_scheme, ty::GenericPredicates::empty(), - subst::ParamSpace::TypeSpace).clean(cx); Some(clean::Item { name: Some(assoc_ty.name.clean(cx)), inner: clean::TypedefItem(typedef, true), @@ -426,7 +460,7 @@ pub fn build_impl<'a, 'tcx>(cx: &DocContext, .into_iter() .map(|meth| meth.name.to_string()) .collect() - }).unwrap_or(HashSet::new()); + }).unwrap_or(FnvHashSet()); ret.push(clean::Item { inner: clean::ImplItem(clean::Impl { @@ -434,9 +468,9 @@ pub fn build_impl<'a, 'tcx>(cx: &DocContext, provided_trait_methods: provided, trait_: trait_, for_: for_, - generics: (&ty.generics, &predicates, subst::TypeSpace).clean(cx), + generics: (ty.generics, &predicates).clean(cx), items: trait_items, - polarity: polarity.map(|p| { p.clean(cx) }), + polarity: Some(polarity.clean(cx)), }), source: clean::Span::empty(), name: None, @@ -462,22 +496,15 @@ fn build_module<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>, // If we're reexporting a reexport it may actually reexport something in // two namespaces, so the target may be listed twice. Make sure we only // visit each node at most once. - let mut visited = HashSet::new(); + let mut visited = FnvHashSet(); for item in tcx.sess.cstore.item_children(did) { - match item.def { - cstore::DlDef(Def::ForeignMod(did)) => { - fill_in(cx, tcx, did, items); - } - cstore::DlDef(def) if item.vis == ty::Visibility::Public => { - if !visited.insert(def) { continue } + if tcx.sess.cstore.visibility(item.def_id) == ty::Visibility::Public { + if !visited.insert(item.def_id) { continue } + if let Some(def) = tcx.sess.cstore.describe_def(item.def_id) { if let Some(i) = try_inline_def(cx, tcx, def) { items.extend(i) } } - cstore::DlDef(..) => {} - // All impls were inlined above - cstore::DlImpl(..) => {} - cstore::DlField => panic!("unimplemented field"), } } } @@ -512,11 +539,32 @@ fn build_static<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>, /// its associated types as well. We specifically move these clauses to the /// associated types instead when displaying, so when we're genering the /// generics for the trait itself we need to be sure to remove them. +/// We also need to remove the implied "recursive" Self: Trait bound. /// /// The inverse of this filtering logic can be found in the `Clean` /// implementation for `AssociatedType` fn filter_non_trait_generics(trait_did: DefId, mut g: clean::Generics) -> clean::Generics { + for pred in &mut g.where_predicates { + match *pred { + clean::WherePredicate::BoundPredicate { + ty: clean::Generic(ref s), + ref mut bounds + } if *s == "Self" => { + bounds.retain(|bound| { + match *bound { + clean::TyParamBound::TraitBound(clean::PolyTrait { + trait_: clean::ResolvedPath { did, .. }, + .. + }, _) => did != trait_did, + _ => true + } + }); + } + _ => {} + } + } + g.where_predicates.retain(|pred| { match *pred { clean::WherePredicate::BoundPredicate { @@ -524,8 +572,8 @@ fn filter_non_trait_generics(trait_did: DefId, mut g: clean::Generics) self_type: box clean::Generic(ref s), trait_: box clean::ResolvedPath { did, .. }, name: ref _name, - }, .. - } => *s != "Self" || did != trait_did, + }, ref bounds + } => !(*s == "Self" && did == trait_did) && !bounds.is_empty(), _ => true, } }); diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index 99d2732c4b..0ae059509b 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -12,7 +12,6 @@ //! that clean them. pub use self::Type::*; -pub use self::PrimitiveType::*; pub use self::TypeKind::*; pub use self::VariantKind::*; pub use self::Mutability::*; @@ -27,27 +26,25 @@ pub use self::Visibility::*; use syntax::abi::Abi; use syntax::ast; use syntax::attr; -use syntax::attr::{AttributeMethods, AttrMetaMethods}; use syntax::codemap::Spanned; -use syntax::parse::token::{self, InternedString, keywords}; +use syntax::parse::token::keywords; use syntax::ptr::P; +use syntax::print::pprust as syntax_pprust; use syntax_pos::{self, DUMMY_SP, Pos}; use rustc_trans::back::link; -use rustc::middle::cstore; use rustc::middle::privacy::AccessLevels; use rustc::middle::resolve_lifetime::DefRegion::*; use rustc::hir::def::Def; -use rustc::hir::def_id::{DefId, DefIndex, CRATE_DEF_INDEX}; -use rustc::hir::fold::Folder; +use rustc::hir::def_id::{self, DefId, DefIndex, CRATE_DEF_INDEX}; use rustc::hir::print as pprust; -use rustc::ty::subst::{self, ParamSpace, VecPerParamSpace}; -use rustc::ty; +use rustc::ty::subst::Substs; +use rustc::ty::{self, AdtKind}; use rustc::middle::stability; +use rustc::util::nodemap::{FnvHashMap, FnvHashSet}; use rustc::hir; -use std::collections::{HashMap, HashSet}; use std::path::PathBuf; use std::rc::Rc; use std::sync::Arc; @@ -82,12 +79,6 @@ impl, U> Clean> for [T] { } } -impl, U> Clean> for VecPerParamSpace { - fn clean(&self, cx: &DocContext) -> VecPerParamSpace { - self.map(|x| x.clean(cx)) - } -} - impl, U> Clean for P { fn clean(&self, cx: &DocContext) -> U { (**self).clean(cx) @@ -123,15 +114,15 @@ pub struct Crate { pub name: String, pub src: PathBuf, pub module: Option, - pub externs: Vec<(ast::CrateNum, ExternalCrate)>, + pub externs: Vec<(def_id::CrateNum, ExternalCrate)>, pub primitives: Vec, pub access_levels: Arc>, // These are later on moved into `CACHEKEY`, leaving the map empty. // Only here so that they can be filtered through the rustdoc passes. - pub external_traits: HashMap, + pub external_traits: FnvHashMap, } -struct CrateNum(ast::CrateNum); +struct CrateNum(def_id::CrateNum); impl<'a, 'tcx> Clean for visit_ast::RustdocVisitor<'a, 'tcx> { fn clean(&self, cx: &DocContext) -> Crate { @@ -141,6 +132,8 @@ impl<'a, 'tcx> Clean for visit_ast::RustdocVisitor<'a, 'tcx> { if let Some(t) = cx.tcx_opt() { cx.deref_trait_did.set(t.lang_items.deref_trait()); cx.renderinfo.borrow_mut().deref_trait_did = cx.deref_trait_did.get(); + cx.deref_mut_trait_did.set(t.lang_items.deref_mut_trait()); + cx.renderinfo.borrow_mut().deref_mut_trait_did = cx.deref_mut_trait_did.get(); } let mut externs = Vec::new(); @@ -244,19 +237,16 @@ pub struct ExternalCrate { impl Clean for CrateNum { fn clean(&self, cx: &DocContext) -> ExternalCrate { let mut primitives = Vec::new(); + let root = DefId { krate: self.0, index: CRATE_DEF_INDEX }; cx.tcx_opt().map(|tcx| { - for item in tcx.sess.cstore.crate_top_level_items(self.0) { - let did = match item.def { - cstore::DlDef(Def::Mod(did)) => did, - _ => continue - }; - let attrs = inline::load_attrs(cx, tcx, did); + for item in tcx.sess.cstore.item_children(root) { + let attrs = inline::load_attrs(cx, tcx, item.def_id); PrimitiveType::find(&attrs).map(|prim| primitives.push(prim)); } }); ExternalCrate { name: (&cx.sess().cstore.crate_name(self.0)[..]).to_owned(), - attrs: cx.sess().cstore.crate_attrs(self.0).clean(cx), + attrs: cx.sess().cstore.item_attrs(root).clean(cx), primitives: primitives, } } @@ -293,34 +283,34 @@ impl Item { } } pub fn is_mod(&self) -> bool { - ItemType::from_item(self) == ItemType::Module + ItemType::from(self) == ItemType::Module } pub fn is_trait(&self) -> bool { - ItemType::from_item(self) == ItemType::Trait + ItemType::from(self) == ItemType::Trait } pub fn is_struct(&self) -> bool { - ItemType::from_item(self) == ItemType::Struct + ItemType::from(self) == ItemType::Struct } pub fn is_enum(&self) -> bool { - ItemType::from_item(self) == ItemType::Module + ItemType::from(self) == ItemType::Module } pub fn is_fn(&self) -> bool { - ItemType::from_item(self) == ItemType::Function + ItemType::from(self) == ItemType::Function } pub fn is_associated_type(&self) -> bool { - ItemType::from_item(self) == ItemType::AssociatedType + ItemType::from(self) == ItemType::AssociatedType } pub fn is_associated_const(&self) -> bool { - ItemType::from_item(self) == ItemType::AssociatedConst + ItemType::from(self) == ItemType::AssociatedConst } pub fn is_method(&self) -> bool { - ItemType::from_item(self) == ItemType::Method + ItemType::from(self) == ItemType::Method } pub fn is_ty_method(&self) -> bool { - ItemType::from_item(self) == ItemType::TyMethod + ItemType::from(self) == ItemType::TyMethod } pub fn is_primitive(&self) -> bool { - ItemType::from_item(self) == ItemType::Primitive + ItemType::from(self) == ItemType::Primitive } pub fn is_stripped(&self) -> bool { match self.inner { StrippedItem(..) => true, _ => false } @@ -328,6 +318,7 @@ impl Item { pub fn has_stripped_fields(&self) -> Option { match self.inner { StructItem(ref _struct) => Some(_struct.fields_stripped), + UnionItem(ref union) => Some(union.fields_stripped), VariantItem(Variant { kind: StructVariant(ref vstruct)} ) => { Some(vstruct.fields_stripped) }, @@ -358,6 +349,7 @@ pub enum ItemEnum { ExternCrateItem(String, Option), ImportItem(Import), StructItem(Struct), + UnionItem(Union), EnumItem(Enum), FunctionItem(Function), ModuleItem(Module), @@ -386,6 +378,23 @@ pub enum ItemEnum { StrippedItem(Box), } +impl ItemEnum { + pub fn generics(&self) -> Option<&Generics> { + Some(match *self { + ItemEnum::StructItem(ref s) => &s.generics, + ItemEnum::EnumItem(ref e) => &e.generics, + ItemEnum::FunctionItem(ref f) => &f.generics, + ItemEnum::TypedefItem(ref t, _) => &t.generics, + ItemEnum::TraitItem(ref t) => &t.generics, + ItemEnum::ImplItem(ref i) => &i.generics, + ItemEnum::TyMethodItem(ref i) => &i.generics, + ItemEnum::MethodItem(ref i) => &i.generics, + ItemEnum::ForeignFunctionItem(ref f) => &f.generics, + _ => return None, + }) + } +} + #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct Module { pub items: Vec, @@ -404,6 +413,7 @@ impl Clean for doctree::Module { items.extend(self.extern_crates.iter().map(|x| x.clean(cx))); items.extend(self.imports.iter().flat_map(|x| x.clean(cx))); items.extend(self.structs.iter().map(|x| x.clean(cx))); + items.extend(self.unions.iter().map(|x| x.clean(cx))); items.extend(self.enums.iter().map(|x| x.clean(cx))); items.extend(self.fns.iter().map(|x| x.clean(cx))); items.extend(self.foreigns.iter().flat_map(|x| x.clean(cx))); @@ -491,11 +501,24 @@ impl Attributes for [Attribute] { } } +/// This is a flattened version of the AST's Attribute + MetaItem. #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)] pub enum Attribute { Word(String), List(String, Vec), - NameValue(String, String) + NameValue(String, String), + Literal(String), +} + +impl Clean for ast::NestedMetaItem { + fn clean(&self, cx: &DocContext) -> Attribute { + if let Some(mi) = self.meta_item() { + mi.clean(cx) + } else { // must be a literal + let lit = self.literal().unwrap(); + Literal(syntax_pprust::lit_to_string(lit)) + } + } } impl Clean for ast::MetaItem { @@ -517,50 +540,6 @@ impl Clean for ast::Attribute { } } -// This is a rough approximation that gets us what we want. -impl attr::AttrMetaMethods for Attribute { - fn name(&self) -> InternedString { - match *self { - Word(ref n) | List(ref n, _) | NameValue(ref n, _) => { - token::intern_and_get_ident(n) - } - } - } - - fn value_str(&self) -> Option { - match *self { - NameValue(_, ref v) => { - Some(token::intern_and_get_ident(v)) - } - _ => None, - } - } - fn meta_item_list<'a>(&'a self) -> Option<&'a [P]> { None } - - fn is_word(&self) -> bool { - match *self { - Word(_) => true, - _ => false, - } - } - - fn is_value_str(&self) -> bool { - match *self { - NameValue(..) => true, - _ => false, - } - } - - fn is_meta_item_list(&self) -> bool { - match *self { - List(..) => true, - _ => false, - } - } - - fn span(&self) -> syntax_pos::Span { unimplemented!() } -} - #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)] pub struct TyParam { pub name: String, @@ -630,33 +609,10 @@ impl Clean for hir::TyParamBound { } } -impl<'tcx> Clean<(Vec, Vec)> for ty::ExistentialBounds<'tcx> { - fn clean(&self, cx: &DocContext) -> (Vec, Vec) { - let mut tp_bounds = vec![]; - self.region_bound.clean(cx).map(|b| tp_bounds.push(RegionBound(b))); - for bb in &self.builtin_bounds { - tp_bounds.push(bb.clean(cx)); - } - - let mut bindings = vec![]; - for &ty::Binder(ref pb) in &self.projection_bounds { - bindings.push(TypeBinding { - name: pb.projection_ty.item_name.clean(cx), - ty: pb.ty.clean(cx) - }); - } - - (tp_bounds, bindings) - } -} - -fn external_path_params(cx: &DocContext, trait_did: Option, - bindings: Vec, substs: &subst::Substs) -> PathParameters { - let lifetimes = substs.regions.get_slice(subst::TypeSpace) - .iter() - .filter_map(|v| v.clean(cx)) - .collect(); - let types = substs.types.get_slice(subst::TypeSpace).to_vec(); +fn external_path_params(cx: &DocContext, trait_did: Option, has_self: bool, + bindings: Vec, substs: &Substs) -> PathParameters { + let lifetimes = substs.regions().filter_map(|v| v.clean(cx)).collect(); + let types = substs.types().skip(has_self as usize).collect::>(); match (trait_did, cx.tcx_opt()) { // Attempt to sugar an external path like Fn<(A, B,), C> to Fn(A, B) -> C @@ -683,7 +639,7 @@ fn external_path_params(cx: &DocContext, trait_did: Option, output: output } }, - (_, _) => { + (..) => { PathParameters::AngleBracketed { lifetimes: lifetimes, types: types.clean(cx), @@ -695,13 +651,13 @@ fn external_path_params(cx: &DocContext, trait_did: Option, // trait_did should be set to a trait's DefId if called on a TraitRef, in order to sugar // from Fn<(A, B,), C> to Fn(A, B) -> C -fn external_path(cx: &DocContext, name: &str, trait_did: Option, - bindings: Vec, substs: &subst::Substs) -> Path { +fn external_path(cx: &DocContext, name: &str, trait_did: Option, has_self: bool, + bindings: Vec, substs: &Substs) -> Path { Path { global: false, segments: vec![PathSegment { name: name.to_string(), - params: external_path_params(cx, trait_did, bindings, substs) + params: external_path_params(cx, trait_did, has_self, bindings, substs) }], } } @@ -712,20 +668,20 @@ impl Clean for ty::BuiltinBound { Some(tcx) => tcx, None => return RegionBound(Lifetime::statik()) }; - let empty = subst::Substs::empty(); + let empty = Substs::empty(tcx); let (did, path) = match *self { ty::BoundSend => (tcx.lang_items.send_trait().unwrap(), - external_path(cx, "Send", None, vec![], &empty)), + external_path(cx, "Send", None, false, vec![], empty)), ty::BoundSized => (tcx.lang_items.sized_trait().unwrap(), - external_path(cx, "Sized", None, vec![], &empty)), + external_path(cx, "Sized", None, false, vec![], empty)), ty::BoundCopy => (tcx.lang_items.copy_trait().unwrap(), - external_path(cx, "Copy", None, vec![], &empty)), + external_path(cx, "Copy", None, false, vec![], empty)), ty::BoundSync => (tcx.lang_items.sync_trait().unwrap(), - external_path(cx, "Sync", None, vec![], &empty)), + external_path(cx, "Sync", None, false, vec![], empty)), }; inline::record_extern_fqn(cx, did, TypeTrait); TraitBound(PolyTrait { @@ -748,18 +704,17 @@ impl<'tcx> Clean for ty::TraitRef<'tcx> { }; inline::record_extern_fqn(cx, self.def_id, TypeTrait); let path = external_path(cx, &tcx.item_name(self.def_id).as_str(), - Some(self.def_id), vec![], self.substs); + Some(self.def_id), true, vec![], self.substs); - debug!("ty::TraitRef\n substs.types(TypeSpace): {:?}\n", - self.substs.types.get_slice(ParamSpace::TypeSpace)); + debug!("ty::TraitRef\n subst: {:?}\n", self.substs); // collect any late bound regions let mut late_bounds = vec![]; - for &ty_s in self.substs.types.get_slice(ParamSpace::TypeSpace) { + for ty_s in self.input_types().skip(1) { if let ty::TyTuple(ts) = ty_s.sty { for &ty_s in ts { if let ty::TyRef(ref reg, _) = ty_s.sty { - if let &ty::Region::ReLateBound(_, _) = *reg { + if let &ty::Region::ReLateBound(..) = *reg { debug!(" hit an ReLateBound {:?}", reg); if let Some(lt) = reg.clean(cx) { late_bounds.push(lt); @@ -785,11 +740,12 @@ impl<'tcx> Clean for ty::TraitRef<'tcx> { } } -impl<'tcx> Clean>> for subst::Substs<'tcx> { +impl<'tcx> Clean>> for Substs<'tcx> { fn clean(&self, cx: &DocContext) -> Option> { let mut v = Vec::new(); - v.extend(self.regions.iter().filter_map(|r| r.clean(cx)).map(RegionBound)); - v.extend(self.types.iter().map(|t| TraitBound(PolyTrait { + v.extend(self.regions().filter_map(|r| r.clean(cx)) + .map(RegionBound)); + v.extend(self.types().map(|t| TraitBound(PolyTrait { trait_: t.clean(cx), lifetimes: vec![] }, hir::TraitBoundModifier::None))); @@ -813,7 +769,20 @@ impl Lifetime { } impl Clean for hir::Lifetime { - fn clean(&self, _: &DocContext) -> Lifetime { + fn clean(&self, cx: &DocContext) -> Lifetime { + if let Some(tcx) = cx.tcx_opt() { + let def = tcx.named_region_map.defs.get(&self.id).cloned(); + match def { + Some(DefEarlyBoundRegion(_, node_id)) | + Some(DefLateBoundRegion(_, node_id)) | + Some(DefFreeRegion(_, node_id)) => { + if let Some(lt) = cx.lt_substs.borrow().get(&node_id).cloned() { + return lt; + } + } + _ => {} + } + } Lifetime(self.name.to_string()) } } @@ -834,7 +803,7 @@ impl Clean for hir::LifetimeDef { } } -impl Clean for ty::RegionParameterDef { +impl<'tcx> Clean for ty::RegionParameterDef<'tcx> { fn clean(&self, _: &DocContext) -> Lifetime { Lifetime(self.name.to_string()) } @@ -902,7 +871,6 @@ impl<'a> Clean for ty::Predicate<'a> { Predicate::WellFormed(_) => panic!("not user writable"), Predicate::ObjectSafe(_) => panic!("not user writable"), Predicate::ClosureKind(..) => panic!("not user writable"), - Predicate::Rfc1592(..) => panic!("not user writable"), } } } @@ -910,7 +878,7 @@ impl<'a> Clean for ty::Predicate<'a> { impl<'a> Clean for ty::TraitPredicate<'a> { fn clean(&self, cx: &DocContext) -> WherePredicate { WherePredicate::BoundPredicate { - ty: self.trait_ref.substs.self_ty().clean(cx).unwrap(), + ty: self.trait_ref.self_ty().clean(cx), bounds: vec![self.trait_ref.clean(cx)] } } @@ -926,7 +894,7 @@ impl<'tcx> Clean for ty::EquatePredicate<'tcx> { } } -impl Clean for ty::OutlivesPredicate { +impl<'tcx> Clean for ty::OutlivesPredicate<&'tcx ty::Region, &'tcx ty::Region> { fn clean(&self, cx: &DocContext) -> WherePredicate { let ty::OutlivesPredicate(ref a, ref b) = *self; WherePredicate::RegionPredicate { @@ -936,7 +904,7 @@ impl Clean for ty::OutlivesPredicate { } } -impl<'tcx> Clean for ty::OutlivesPredicate, ty::Region> { +impl<'tcx> Clean for ty::OutlivesPredicate, &'tcx ty::Region> { fn clean(&self, cx: &DocContext) -> WherePredicate { let ty::OutlivesPredicate(ref ty, ref lt) = *self; @@ -991,27 +959,30 @@ impl Clean for hir::Generics { } impl<'a, 'tcx> Clean for (&'a ty::Generics<'tcx>, - &'a ty::GenericPredicates<'tcx>, - subst::ParamSpace) { + &'a ty::GenericPredicates<'tcx>) { fn clean(&self, cx: &DocContext) -> Generics { use self::WherePredicate as WP; - let (gens, preds, space) = *self; + let (gens, preds) = *self; // Bounds in the type_params and lifetimes fields are repeated in the // predicates field (see rustc_typeck::collect::ty_generics), so remove // them. - let stripped_typarams = gens.types.get_slice(space).iter().map(|tp| { - tp.clean(cx) + let stripped_typarams = gens.types.iter().filter_map(|tp| { + if tp.name == keywords::SelfType.name() { + assert_eq!(tp.index, 0); + None + } else { + Some(tp.clean(cx)) + } }).collect::>(); - let stripped_lifetimes = gens.regions.get_slice(space).iter().map(|rp| { + let stripped_lifetimes = gens.regions.iter().map(|rp| { let mut srp = rp.clone(); srp.bounds = Vec::new(); srp.clean(cx) }).collect::>(); - let mut where_predicates = preds.predicates.get_slice(space) - .to_vec().clean(cx); + let mut where_predicates = preds.predicates.to_vec().clean(cx); // Type parameters and have a Sized bound by default unless removed with // ?Sized. Scan through the predicates and mark any type parameter with @@ -1020,7 +991,7 @@ impl<'a, 'tcx> Clean for (&'a ty::Generics<'tcx>, // Note that associated types also have a sized bound by default, but we // don't actually know the set of associated types right here so that's // handled in cleaning associated types - let mut sized_params = HashSet::new(); + let mut sized_params = FnvHashSet(); where_predicates.retain(|pred| { match *pred { WP::BoundPredicate { ty: Generic(ref g), ref bounds } => { @@ -1156,6 +1127,10 @@ impl FnDecl { pub fn has_self(&self) -> bool { return self.inputs.values.len() > 0 && self.inputs.values[0].name == "self"; } + + pub fn self_type(&self) -> Option { + self.inputs.values.get(0).and_then(|v| v.to_self()) + } } #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)] @@ -1182,7 +1157,7 @@ impl<'a, 'tcx> Clean for (DefId, &'a ty::PolyFnSig<'tcx>) { let mut names = if cx.map.as_local_node_id(did).is_some() { vec![].into_iter() } else { - cx.tcx().sess.cstore.method_arg_names(did).into_iter() + cx.tcx().sess.cstore.fn_arg_names(did).into_iter() }.peekable(); FnDecl { output: Return(sig.0.output.clean(cx)), @@ -1192,8 +1167,8 @@ impl<'a, 'tcx> Clean for (DefId, &'a ty::PolyFnSig<'tcx>) { values: sig.0.inputs.iter().map(|t| { Argument { type_: t.clean(cx), - id: 0, - name: names.next().unwrap_or("".to_string()), + id: ast::CRATE_NODE_ID, + name: names.next().map_or("".to_string(), |name| name.to_string()), } }).collect(), }, @@ -1363,8 +1338,7 @@ impl Clean for hir::ImplItem { impl<'tcx> Clean for ty::Method<'tcx> { fn clean(&self, cx: &DocContext) -> Item { - let generics = (&self.generics, &self.predicates, - subst::FnSpace).clean(cx); + let generics = (self.generics, &self.predicates).clean(cx); let mut decl = (self.def_id, &self.fty.sig).clean(cx); match self.explicit_self { ty::ExplicitSelfCategory::ByValue => { @@ -1495,8 +1469,8 @@ pub enum PrimitiveType { Str, Slice, Array, - PrimitiveTuple, - PrimitiveRawPointer, + Tuple, + RawPointer, } #[derive(Clone, RustcEncodable, RustcDecodable, Copy, Debug)] @@ -1507,6 +1481,7 @@ pub enum TypeKind { TypeConst, TypeStatic, TypeStruct, + TypeUnion, TypeTrait, TypeVariant, TypeTypedef, @@ -1526,12 +1501,12 @@ impl Type { pub fn primitive_type(&self) -> Option { match *self { Primitive(p) | BorrowedRef { type_: box Primitive(p), ..} => Some(p), - Vector(..) | BorrowedRef{ type_: box Vector(..), .. } => Some(Slice), + Vector(..) | BorrowedRef{ type_: box Vector(..), .. } => Some(PrimitiveType::Slice), FixedVector(..) | BorrowedRef { type_: box FixedVector(..), .. } => { - Some(Array) + Some(PrimitiveType::Array) } - Tuple(..) => Some(PrimitiveTuple), - RawPointer(..) => Some(PrimitiveRawPointer), + Tuple(..) => Some(PrimitiveType::Tuple), + RawPointer(..) => Some(PrimitiveType::RawPointer), _ => None, } } @@ -1556,25 +1531,25 @@ impl GetDefId for Type { impl PrimitiveType { fn from_str(s: &str) -> Option { match s { - "isize" => Some(Isize), - "i8" => Some(I8), - "i16" => Some(I16), - "i32" => Some(I32), - "i64" => Some(I64), - "usize" => Some(Usize), - "u8" => Some(U8), - "u16" => Some(U16), - "u32" => Some(U32), - "u64" => Some(U64), - "bool" => Some(Bool), - "char" => Some(Char), - "str" => Some(Str), - "f32" => Some(F32), - "f64" => Some(F64), - "array" => Some(Array), - "slice" => Some(Slice), - "tuple" => Some(PrimitiveTuple), - "pointer" => Some(PrimitiveRawPointer), + "isize" => Some(PrimitiveType::Isize), + "i8" => Some(PrimitiveType::I8), + "i16" => Some(PrimitiveType::I16), + "i32" => Some(PrimitiveType::I32), + "i64" => Some(PrimitiveType::I64), + "usize" => Some(PrimitiveType::Usize), + "u8" => Some(PrimitiveType::U8), + "u16" => Some(PrimitiveType::U16), + "u32" => Some(PrimitiveType::U32), + "u64" => Some(PrimitiveType::U64), + "bool" => Some(PrimitiveType::Bool), + "char" => Some(PrimitiveType::Char), + "str" => Some(PrimitiveType::Str), + "f32" => Some(PrimitiveType::F32), + "f64" => Some(PrimitiveType::F64), + "array" => Some(PrimitiveType::Array), + "slice" => Some(PrimitiveType::Slice), + "tuple" => Some(PrimitiveType::Tuple), + "pointer" => Some(PrimitiveType::RawPointer), _ => None, } } @@ -1594,25 +1569,25 @@ impl PrimitiveType { pub fn to_string(&self) -> &'static str { match *self { - Isize => "isize", - I8 => "i8", - I16 => "i16", - I32 => "i32", - I64 => "i64", - Usize => "usize", - U8 => "u8", - U16 => "u16", - U32 => "u32", - U64 => "u64", - F32 => "f32", - F64 => "f64", - Str => "str", - Bool => "bool", - Char => "char", - Array => "array", - Slice => "slice", - PrimitiveTuple => "tuple", - PrimitiveRawPointer => "pointer", + PrimitiveType::Isize => "isize", + PrimitiveType::I8 => "i8", + PrimitiveType::I16 => "i16", + PrimitiveType::I32 => "i32", + PrimitiveType::I64 => "i64", + PrimitiveType::Usize => "usize", + PrimitiveType::U8 => "u8", + PrimitiveType::U16 => "u16", + PrimitiveType::U32 => "u32", + PrimitiveType::U64 => "u64", + PrimitiveType::F32 => "f32", + PrimitiveType::F64 => "f64", + PrimitiveType::Str => "str", + PrimitiveType::Bool => "bool", + PrimitiveType::Char => "char", + PrimitiveType::Array => "array", + PrimitiveType::Slice => "slice", + PrimitiveType::Tuple => "tuple", + PrimitiveType::RawPointer => "pointer", } } @@ -1629,40 +1604,36 @@ impl PrimitiveType { } } - -// Poor man's type parameter substitution at HIR level. -// Used to replace private type aliases in public signatures with their aliased types. -struct SubstAlias<'a, 'tcx: 'a> { - tcx: &'a ty::TyCtxt<'a, 'tcx, 'tcx>, - // Table type parameter definition -> substituted type - ty_substs: HashMap, - // Table node id of lifetime parameter definition -> substituted lifetime - lt_substs: HashMap, +impl From for PrimitiveType { + fn from(int_ty: ast::IntTy) -> PrimitiveType { + match int_ty { + ast::IntTy::Is => PrimitiveType::Isize, + ast::IntTy::I8 => PrimitiveType::I8, + ast::IntTy::I16 => PrimitiveType::I16, + ast::IntTy::I32 => PrimitiveType::I32, + ast::IntTy::I64 => PrimitiveType::I64, + } + } } -impl<'a, 'tcx: 'a, 'b: 'tcx> Folder for SubstAlias<'a, 'tcx> { - fn fold_ty(&mut self, ty: P) -> P { - if let hir::TyPath(..) = ty.node { - let def = self.tcx.expect_def(ty.id); - if let Some(new_ty) = self.ty_substs.get(&def).cloned() { - return P(new_ty); - } +impl From for PrimitiveType { + fn from(uint_ty: ast::UintTy) -> PrimitiveType { + match uint_ty { + ast::UintTy::Us => PrimitiveType::Usize, + ast::UintTy::U8 => PrimitiveType::U8, + ast::UintTy::U16 => PrimitiveType::U16, + ast::UintTy::U32 => PrimitiveType::U32, + ast::UintTy::U64 => PrimitiveType::U64, } - hir::fold::noop_fold_ty(ty, self) } - fn fold_lifetime(&mut self, lt: hir::Lifetime) -> hir::Lifetime { - let def = self.tcx.named_region_map.defs.get(<.id).cloned(); - match def { - Some(DefEarlyBoundRegion(_, _, node_id)) | - Some(DefLateBoundRegion(_, node_id)) | - Some(DefFreeRegion(_, node_id)) => { - if let Some(lt) = self.lt_substs.get(&node_id).cloned() { - return lt; - } - } - _ => {} +} + +impl From for PrimitiveType { + fn from(float_ty: ast::FloatTy) -> PrimitiveType { + match float_ty { + ast::FloatTy::F32 => PrimitiveType::F32, + ast::FloatTy::F64 => PrimitiveType::F64, } - hir::fold::noop_fold_lifetime(lt, self) } } @@ -1697,43 +1668,47 @@ impl Clean for hir::Ty { }, TyTup(ref tys) => Tuple(tys.clean(cx)), TyPath(None, ref path) => { - if let Some(tcx) = cx.tcx_opt() { - // Substitute private type aliases - let def = tcx.expect_def(self.id); + let tcx_and_def = cx.tcx_opt().map(|tcx| (tcx, tcx.expect_def(self.id))); + if let Some((_, def)) = tcx_and_def { + if let Some(new_ty) = cx.ty_substs.borrow().get(&def).cloned() { + return new_ty; + } + } + + let tcx_and_alias = tcx_and_def.and_then(|(tcx, def)| { if let Def::TyAlias(def_id) = def { - if let Some(node_id) = tcx.map.as_local_node_id(def_id) { + // Substitute private type aliases + tcx.map.as_local_node_id(def_id).and_then(|node_id| { if !cx.access_levels.borrow().is_exported(def_id) { - let item = tcx.map.expect_item(node_id); - if let hir::ItemTy(ref ty, ref generics) = item.node { - let provided_params = &path.segments.last().unwrap().parameters; - let mut ty_substs = HashMap::new(); - let mut lt_substs = HashMap::new(); - for (i, ty_param) in generics.ty_params.iter().enumerate() { - let ty_param_def = tcx.expect_def(ty_param.id); - if let Some(ty) = provided_params.types().get(i).cloned() - .cloned() { - ty_substs.insert(ty_param_def, ty.unwrap()); - } else if let Some(default) = ty_param.default.clone() { - ty_substs.insert(ty_param_def, default.unwrap()); - } - } - for (i, lt_param) in generics.lifetimes.iter().enumerate() { - if let Some(lt) = provided_params.lifetimes().get(i) - .cloned() - .cloned() { - lt_substs.insert(lt_param.lifetime.id, lt); - } - } - let mut subst_alias = SubstAlias { - tcx: &tcx, - ty_substs: ty_substs, - lt_substs: lt_substs - }; - return subst_alias.fold_ty(ty.clone()).clean(cx); - } + Some((tcx, &tcx.map.expect_item(node_id).node)) + } else { + None } + }) + } else { + None + } + }); + if let Some((tcx, &hir::ItemTy(ref ty, ref generics))) = tcx_and_alias { + let provided_params = &path.segments.last().unwrap().parameters; + let mut ty_substs = FnvHashMap(); + let mut lt_substs = FnvHashMap(); + for (i, ty_param) in generics.ty_params.iter().enumerate() { + let ty_param_def = tcx.expect_def(ty_param.id); + if let Some(ty) = provided_params.types().get(i).cloned() + .cloned() { + ty_substs.insert(ty_param_def, ty.unwrap().clean(cx)); + } else if let Some(default) = ty_param.default.clone() { + ty_substs.insert(ty_param_def, default.unwrap().clean(cx)); } } + for (i, lt_param) in generics.lifetimes.iter().enumerate() { + if let Some(lt) = provided_params.lifetimes().get(i).cloned() + .cloned() { + lt_substs.insert(lt_param.lifetime.id, lt.clean(cx)); + } + } + return cx.enter_alias(ty_substs, lt_substs, || ty.clean(cx)); } resolve_type(cx, path.clean(cx), self.id) } @@ -1780,21 +1755,12 @@ impl<'tcx> Clean for ty::Ty<'tcx> { fn clean(&self, cx: &DocContext) -> Type { match self.sty { ty::TyNever => Never, - ty::TyBool => Primitive(Bool), - ty::TyChar => Primitive(Char), - ty::TyInt(ast::IntTy::Is) => Primitive(Isize), - ty::TyInt(ast::IntTy::I8) => Primitive(I8), - ty::TyInt(ast::IntTy::I16) => Primitive(I16), - ty::TyInt(ast::IntTy::I32) => Primitive(I32), - ty::TyInt(ast::IntTy::I64) => Primitive(I64), - ty::TyUint(ast::UintTy::Us) => Primitive(Usize), - ty::TyUint(ast::UintTy::U8) => Primitive(U8), - ty::TyUint(ast::UintTy::U16) => Primitive(U16), - ty::TyUint(ast::UintTy::U32) => Primitive(U32), - ty::TyUint(ast::UintTy::U64) => Primitive(U64), - ty::TyFloat(ast::FloatTy::F32) => Primitive(F32), - ty::TyFloat(ast::FloatTy::F64) => Primitive(F64), - ty::TyStr => Primitive(Str), + ty::TyBool => Primitive(PrimitiveType::Bool), + ty::TyChar => Primitive(PrimitiveType::Char), + ty::TyInt(int_ty) => Primitive(int_ty.into()), + ty::TyUint(uint_ty) => Primitive(uint_ty.into()), + ty::TyFloat(float_ty) => Primitive(float_ty.into()), + ty::TyStr => Primitive(PrimitiveType::Str), ty::TyBox(t) => { let box_did = cx.tcx_opt().and_then(|tcx| { tcx.lang_items.owned_box() @@ -1810,7 +1776,7 @@ impl<'tcx> Clean for ty::Ty<'tcx> { mutability: mt.mutbl.clean(cx), type_: box mt.ty.clean(cx), }, - ty::TyFnDef(_, _, ref fty) | + ty::TyFnDef(.., ref fty) | ty::TyFnPtr(ref fty) => BareFunction(box BareFunctionDecl { unsafety: fty.unsafety, generics: Generics { @@ -1818,19 +1784,19 @@ impl<'tcx> Clean for ty::Ty<'tcx> { type_params: Vec::new(), where_predicates: Vec::new() }, - decl: (cx.map.local_def_id(0), &fty.sig).clean(cx), + decl: (cx.map.local_def_id(ast::CRATE_NODE_ID), &fty.sig).clean(cx), abi: fty.abi, }), - ty::TyStruct(def, substs) | - ty::TyEnum(def, substs) => { + ty::TyAdt(def, substs) => { let did = def.did; - let kind = match self.sty { - ty::TyStruct(..) => TypeStruct, - _ => TypeEnum, + let kind = match def.adt_kind() { + AdtKind::Struct => TypeStruct, + AdtKind::Union => TypeUnion, + AdtKind::Enum => TypeEnum, }; inline::record_extern_fqn(cx, did, kind); let path = external_path(cx, &cx.tcx().item_name(did).as_str(), - None, vec![], substs); + None, false, vec![], substs); ResolvedPath { path: path, typarams: None, @@ -1838,12 +1804,26 @@ impl<'tcx> Clean for ty::Ty<'tcx> { is_generic: false, } } - ty::TyTrait(box ty::TraitTy { ref principal, ref bounds }) => { - let did = principal.def_id(); + ty::TyTrait(ref obj) => { + let did = obj.principal.def_id(); inline::record_extern_fqn(cx, did, TypeTrait); - let (typarams, bindings) = bounds.clean(cx); + + let mut typarams = vec![]; + obj.region_bound.clean(cx).map(|b| typarams.push(RegionBound(b))); + for bb in &obj.builtin_bounds { + typarams.push(bb.clean(cx)); + } + + let mut bindings = vec![]; + for &ty::Binder(ref pb) in &obj.projection_bounds { + bindings.push(TypeBinding { + name: pb.item_name.clean(cx), + ty: pb.ty.clean(cx) + }); + } + let path = external_path(cx, &cx.tcx().item_name(did).as_str(), - Some(did), bindings, principal.substs()); + Some(did), false, bindings, obj.principal.0.substs); ResolvedPath { path: path, typarams: Some(typarams), @@ -1863,8 +1843,7 @@ impl<'tcx> Clean for ty::Ty<'tcx> { let item_predicates = cx.tcx().lookup_predicates(def_id); let substs = cx.tcx().lift(&substs).unwrap(); let bounds = item_predicates.instantiate(cx.tcx(), substs); - let predicates = bounds.predicates.into_vec(); - ImplTrait(predicates.into_iter().filter_map(|predicate| { + ImplTrait(bounds.predicates.into_iter().filter_map(|predicate| { predicate.to_opt_poly_trait_ref().clean(cx) }).collect()) } @@ -1894,11 +1873,9 @@ impl Clean for hir::StructField { impl<'tcx> Clean for ty::FieldDefData<'tcx, 'static> { fn clean(&self, cx: &DocContext) -> Item { - // FIXME: possible O(n^2)-ness! Not my fault. - let attr_map = cx.tcx().sess.cstore.crate_struct_field_attrs(self.did.krate); Item { name: Some(self.name).clean(cx), - attrs: attr_map.get(&self.did).unwrap_or(&Vec::new()).clean(cx), + attrs: cx.tcx().get_attrs(self.did).clean(cx), source: Span::empty(), visibility: self.vis.clean(cx), stability: get_stability(cx, self.did), @@ -1935,6 +1912,14 @@ pub struct Struct { pub fields_stripped: bool, } +#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +pub struct Union { + pub struct_type: doctree::StructType, + pub generics: Generics, + pub fields: Vec, + pub fields_stripped: bool, +} + impl Clean for doctree::Struct { fn clean(&self, cx: &DocContext) -> Item { Item { @@ -1955,6 +1940,26 @@ impl Clean for doctree::Struct { } } +impl Clean for doctree::Union { + fn clean(&self, cx: &DocContext) -> Item { + Item { + name: Some(self.name.clean(cx)), + attrs: self.attrs.clean(cx), + source: self.whence.clean(cx), + def_id: cx.map.local_def_id(self.id), + visibility: self.vis.clean(cx), + stability: self.stab.clean(cx), + deprecation: self.depr.clean(cx), + inner: UnionItem(Union { + struct_type: self.struct_type, + generics: self.generics.clean(cx), + fields: self.fields.clean(cx), + fields_stripped: false, + }), + } + } +} + /// This is a more limited form of the standard Struct, different in that /// it lacks the things most items have (name, id, parameterization). Found /// only as a variant in an enum. @@ -2359,7 +2364,7 @@ impl Clean for hir::ImplPolarity { pub struct Impl { pub unsafety: hir::Unsafety, pub generics: Generics, - pub provided_trait_methods: HashSet, + pub provided_trait_methods: FnvHashSet, pub trait_: Option, pub for_: Type, pub items: Vec, @@ -2385,7 +2390,7 @@ impl Clean> for doctree::Impl { .map(|meth| meth.name.to_string()) .collect() }) - }).unwrap_or(HashSet::new()); + }).unwrap_or(FnvHashSet()); ret.push(Item { name: None, @@ -2434,25 +2439,25 @@ fn build_deref_target_impls(cx: &DocContext, } }; let did = match primitive { - Isize => tcx.lang_items.isize_impl(), - I8 => tcx.lang_items.i8_impl(), - I16 => tcx.lang_items.i16_impl(), - I32 => tcx.lang_items.i32_impl(), - I64 => tcx.lang_items.i64_impl(), - Usize => tcx.lang_items.usize_impl(), - U8 => tcx.lang_items.u8_impl(), - U16 => tcx.lang_items.u16_impl(), - U32 => tcx.lang_items.u32_impl(), - U64 => tcx.lang_items.u64_impl(), - F32 => tcx.lang_items.f32_impl(), - F64 => tcx.lang_items.f64_impl(), - Char => tcx.lang_items.char_impl(), - Bool => None, - Str => tcx.lang_items.str_impl(), - Slice => tcx.lang_items.slice_impl(), - Array => tcx.lang_items.slice_impl(), - PrimitiveTuple => None, - PrimitiveRawPointer => tcx.lang_items.const_ptr_impl(), + PrimitiveType::Isize => tcx.lang_items.isize_impl(), + PrimitiveType::I8 => tcx.lang_items.i8_impl(), + PrimitiveType::I16 => tcx.lang_items.i16_impl(), + PrimitiveType::I32 => tcx.lang_items.i32_impl(), + PrimitiveType::I64 => tcx.lang_items.i64_impl(), + PrimitiveType::Usize => tcx.lang_items.usize_impl(), + PrimitiveType::U8 => tcx.lang_items.u8_impl(), + PrimitiveType::U16 => tcx.lang_items.u16_impl(), + PrimitiveType::U32 => tcx.lang_items.u32_impl(), + PrimitiveType::U64 => tcx.lang_items.u64_impl(), + PrimitiveType::F32 => tcx.lang_items.f32_impl(), + PrimitiveType::F64 => tcx.lang_items.f64_impl(), + PrimitiveType::Char => tcx.lang_items.char_impl(), + PrimitiveType::Bool => None, + PrimitiveType::Str => tcx.lang_items.str_impl(), + PrimitiveType::Slice => tcx.lang_items.slice_impl(), + PrimitiveType::Array => tcx.lang_items.slice_impl(), + PrimitiveType::Tuple => None, + PrimitiveType::RawPointer => tcx.lang_items.const_ptr_impl(), }; if let Some(did) = did { if !did.is_local() { @@ -2509,8 +2514,8 @@ impl Clean> for doctree::Import { // Don't inline doc(hidden) imports so they can be stripped at a later stage. let denied = self.vis != hir::Public || self.attrs.iter().any(|a| { &a.name()[..] == "doc" && match a.meta_item_list() { - Some(l) => attr::contains_name(l, "no_inline") || - attr::contains_name(l, "hidden"), + Some(l) => attr::list_contains_name(l, "no_inline") || + attr::list_contains_name(l, "hidden"), None => false, } }); @@ -2526,7 +2531,7 @@ impl Clean> for doctree::Import { let remaining = if !denied { let mut remaining = vec![]; for path in list { - match inline::try_inline(cx, path.node.id(), path.node.rename()) { + match inline::try_inline(cx, path.node.id, path.node.rename) { Some(items) => { ret.extend(items); } @@ -2559,7 +2564,7 @@ impl Clean> for doctree::Import { name: None, attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.map.local_def_id(0), + def_id: cx.map.local_def_id(ast::CRATE_NODE_ID), visibility: self.vis.clean(cx), stability: None, deprecation: None, @@ -2594,17 +2599,10 @@ pub struct ViewListIdent { impl Clean for hir::PathListItem { fn clean(&self, cx: &DocContext) -> ViewListIdent { - match self.node { - hir::PathListIdent { id, name, rename } => ViewListIdent { - name: name.clean(cx), - rename: rename.map(|r| r.clean(cx)), - source: resolve_def(cx, id) - }, - hir::PathListMod { id, rename } => ViewListIdent { - name: "self".to_string(), - rename: rename.map(|r| r.clean(cx)), - source: resolve_def(cx, id) - } + ViewListIdent { + name: self.node.name.clean(cx), + rename: self.node.rename.map(|r| r.clean(cx)), + source: resolve_def(cx, self.node.id) } } } @@ -2679,7 +2677,7 @@ fn name_from_pat(p: &hir::Pat) -> String { match p.node { PatKind::Wild => "_".to_string(), PatKind::Binding(_, ref p, _) => p.node.to_string(), - PatKind::TupleStruct(ref p, _, _) | PatKind::Path(None, ref p) => path_to_string(p), + PatKind::TupleStruct(ref p, ..) | PatKind::Path(None, ref p) => path_to_string(p), PatKind::Path(..) => panic!("tried to get argument name from qualified PatKind::Path, \ which is not allowed in function arguments"), PatKind::Struct(ref name, ref fields, etc) => { @@ -2735,21 +2733,12 @@ fn resolve_type(cx: &DocContext, let is_generic = match def { Def::PrimTy(p) => match p { - hir::TyStr => return Primitive(Str), - hir::TyBool => return Primitive(Bool), - hir::TyChar => return Primitive(Char), - hir::TyInt(ast::IntTy::Is) => return Primitive(Isize), - hir::TyInt(ast::IntTy::I8) => return Primitive(I8), - hir::TyInt(ast::IntTy::I16) => return Primitive(I16), - hir::TyInt(ast::IntTy::I32) => return Primitive(I32), - hir::TyInt(ast::IntTy::I64) => return Primitive(I64), - hir::TyUint(ast::UintTy::Us) => return Primitive(Usize), - hir::TyUint(ast::UintTy::U8) => return Primitive(U8), - hir::TyUint(ast::UintTy::U16) => return Primitive(U16), - hir::TyUint(ast::UintTy::U32) => return Primitive(U32), - hir::TyUint(ast::UintTy::U64) => return Primitive(U64), - hir::TyFloat(ast::FloatTy::F32) => return Primitive(F32), - hir::TyFloat(ast::FloatTy::F64) => return Primitive(F64), + hir::TyStr => return Primitive(PrimitiveType::Str), + hir::TyBool => return Primitive(PrimitiveType::Bool), + hir::TyChar => return Primitive(PrimitiveType::Char), + hir::TyInt(int_ty) => return Primitive(int_ty.into()), + hir::TyUint(uint_ty) => return Primitive(uint_ty.into()), + hir::TyFloat(float_ty) => return Primitive(float_ty.into()), }, Def::SelfTy(..) if path.segments.len() == 1 => { return Generic(keywords::SelfType.name().to_string()); @@ -2764,29 +2753,25 @@ fn resolve_type(cx: &DocContext, fn register_def(cx: &DocContext, def: Def) -> DefId { debug!("register_def({:?})", def); + let tcx = cx.tcx(); + let (did, kind) = match def { Def::Fn(i) => (i, TypeFunction), Def::TyAlias(i) => (i, TypeTypedef), Def::Enum(i) => (i, TypeEnum), Def::Trait(i) => (i, TypeTrait), Def::Struct(i) => (i, TypeStruct), + Def::Union(i) => (i, TypeUnion), Def::Mod(i) => (i, TypeModule), Def::Static(i, _) => (i, TypeStatic), - Def::Variant(i, _) => (i, TypeEnum), + Def::Variant(i) => (tcx.parent_def_id(i).unwrap(), TypeEnum), Def::SelfTy(Some(def_id), _) => (def_id, TypeTrait), - Def::SelfTy(_, Some(impl_id)) => { - // For Def::SelfTy() values inlined from another crate, the - // impl_id will be DUMMY_NODE_ID, which would cause problems. - // But we should never run into an impl from another crate here. - return cx.map.local_def_id(impl_id) + Def::SelfTy(_, Some(impl_def_id)) => { + return impl_def_id } _ => return def.def_id() }; if did.is_local() { return did } - let tcx = match cx.tcx_opt() { - Some(tcx) => tcx, - None => return did - }; inline::record_extern_fqn(cx, did, kind); if let TypeTrait = kind { let t = inline::build_external_trait(cx, tcx, did); @@ -2816,7 +2801,7 @@ pub struct Macro { impl Clean for doctree::Macro { fn clean(&self, cx: &DocContext) -> Item { - let name = format!("{}!", self.name.clean(cx)); + let name = self.name.clean(cx); Item { name: Some(name.clone()), attrs: self.attrs.clean(cx), @@ -2827,8 +2812,10 @@ impl Clean for doctree::Macro { def_id: cx.map.local_def_id(self.id), inner: MacroItem(Macro { source: format!("macro_rules! {} {{\n{}}}", - name.trim_right_matches('!'), self.matchers.iter().map(|span| - format!(" {} => {{ ... }};\n", span.to_src(cx))).collect::()), + name, + self.matchers.iter().map(|span| { + format!(" {} => {{ ... }};\n", span.to_src(cx)) + }).collect::()), imported_from: self.imported_from.clean(cx), }), } @@ -2920,7 +2907,7 @@ impl<'tcx> Clean for ty::AssociatedType<'tcx> { // applied to this associated type in question. let def = cx.tcx().lookup_trait_def(did); let predicates = cx.tcx().lookup_predicates(did); - let generics = (&def.generics, &predicates, subst::TypeSpace).clean(cx); + let generics = (def.generics, &predicates).clean(cx); generics.where_predicates.iter().filter_map(|pred| { let (name, self_type, trait_, bounds) = match *pred { WherePredicate::BoundPredicate { @@ -2967,17 +2954,6 @@ impl<'tcx> Clean for ty::AssociatedType<'tcx> { } } -impl<'a> Clean for (ty::TypeScheme<'a>, ty::GenericPredicates<'a>, - ParamSpace) { - fn clean(&self, cx: &DocContext) -> Typedef { - let (ref ty_scheme, ref predicates, ps) = *self; - Typedef { - type_: ty_scheme.ty.clean(cx), - generics: (&ty_scheme.generics, predicates, ps).clean(cx) - } - } -} - fn lang_struct(cx: &DocContext, did: Option, t: ty::Ty, name: &str, fallback: fn(Box) -> Type) -> Type { diff --git a/src/librustdoc/clean/simplify.rs b/src/librustdoc/clean/simplify.rs index c0faa04323..7ae1774390 100644 --- a/src/librustdoc/clean/simplify.rs +++ b/src/librustdoc/clean/simplify.rs @@ -30,11 +30,11 @@ use std::mem; use std::collections::BTreeMap; use rustc::hir::def_id::DefId; -use rustc::ty::subst; +use rustc::ty; use clean::PathParameters as PP; use clean::WherePredicate as WP; -use clean::{self, Clean}; +use clean; use core::DocContext; pub fn where_clauses(cx: &DocContext, clauses: Vec) -> Vec { @@ -153,27 +153,16 @@ fn trait_is_same_or_supertrait(cx: &DocContext, child: DefId, if child == trait_ { return true } - let def = cx.tcx().lookup_trait_def(child); - let predicates = cx.tcx().lookup_predicates(child); - let generics = (&def.generics, &predicates, subst::TypeSpace).clean(cx); - generics.where_predicates.iter().filter_map(|pred| { - match *pred { - clean::WherePredicate::BoundPredicate { - ty: clean::Generic(ref s), - ref bounds - } if *s == "Self" => Some(bounds), - _ => None, - } - }).flat_map(|bounds| bounds).any(|bound| { - let poly_trait = match *bound { - clean::TraitBound(ref t, _) => t, - _ => return false, - }; - match poly_trait.trait_ { - clean::ResolvedPath { did, .. } => { - trait_is_same_or_supertrait(cx, did, trait_) + let predicates = cx.tcx().lookup_super_predicates(child).predicates; + predicates.iter().filter_map(|pred| { + if let ty::Predicate::Trait(ref pred) = *pred { + if pred.0.trait_ref.self_ty().is_self() { + Some(pred.def_id()) + } else { + None } - _ => false, + } else { + None } - }) + }).any(|did| trait_is_same_or_supertrait(cx, did, trait_)) } diff --git a/src/librustdoc/core.rs b/src/librustdoc/core.rs index 10736d2c82..49e467e5cb 100644 --- a/src/librustdoc/core.rs +++ b/src/librustdoc/core.rs @@ -14,10 +14,12 @@ use rustc_driver::{driver, target_features, abort_on_err}; use rustc::dep_graph::DepGraph; use rustc::session::{self, config}; use rustc::hir::def_id::DefId; +use rustc::hir::def::Def; use rustc::middle::privacy::AccessLevels; use rustc::ty::{self, TyCtxt}; use rustc::hir::map as hir_map; use rustc::lint; +use rustc::util::nodemap::FnvHashMap; use rustc_trans::back::link; use rustc_resolve as resolve; use rustc_metadata::cstore::CStore; @@ -28,8 +30,9 @@ use errors; use errors::emitter::ColorConfig; use std::cell::{RefCell, Cell}; -use std::collections::{HashMap, HashSet}; +use std::mem; use std::rc::Rc; +use std::path::PathBuf; use visit_ast::RustdocVisitor; use clean; @@ -45,14 +48,15 @@ pub enum MaybeTyped<'a, 'tcx: 'a> { NotTyped(&'a session::Session) } -pub type ExternalPaths = HashMap, clean::TypeKind)>; +pub type ExternalPaths = FnvHashMap, clean::TypeKind)>; pub struct DocContext<'a, 'tcx: 'a> { pub map: &'a hir_map::Map<'tcx>, pub maybe_typed: MaybeTyped<'a, 'tcx>, pub input: Input, - pub populated_crate_impls: RefCell>, + pub populated_all_crate_impls: Cell, pub deref_trait_did: Cell>, + pub deref_mut_trait_did: Cell>, // Note that external items for which `doc(hidden)` applies to are shown as // non-reachable while local items aren't. This is because we're reusing // the access levels from crateanalysis. @@ -61,7 +65,15 @@ pub struct DocContext<'a, 'tcx: 'a> { /// Later on moved into `html::render::CACHE_KEY` pub renderinfo: RefCell, /// Later on moved through `clean::Crate` into `html::render::CACHE_KEY` - pub external_traits: RefCell>, + pub external_traits: RefCell>, + + // The current set of type and lifetime substitutions, + // for expanding type aliases at the HIR level: + + /// Table type parameter definition -> substituted type + pub ty_substs: RefCell>, + /// Table node id of lifetime parameter definition -> substituted lifetime + pub lt_substs: RefCell>, } impl<'b, 'tcx> DocContext<'b, 'tcx> { @@ -83,6 +95,22 @@ impl<'b, 'tcx> DocContext<'b, 'tcx> { let tcx_opt = self.tcx_opt(); tcx_opt.expect("tcx not present") } + + /// Call the closure with the given parameters set as + /// the substitutions for a type alias' RHS. + pub fn enter_alias(&self, + ty_substs: FnvHashMap, + lt_substs: FnvHashMap, + f: F) -> R + where F: FnOnce() -> R { + let (old_tys, old_lts) = + (mem::replace(&mut *self.ty_substs.borrow_mut(), ty_substs), + mem::replace(&mut *self.lt_substs.borrow_mut(), lt_substs)); + let r = f(); + *self.ty_substs.borrow_mut() = old_tys; + *self.lt_substs.borrow_mut() = old_lts; + r + } } pub trait DocAccessLevels { @@ -100,7 +128,8 @@ pub fn run_core(search_paths: SearchPaths, cfgs: Vec, externs: config::Externs, input: Input, - triple: Option) -> (clean::Crate, RenderInfo) + triple: Option, + maybe_sysroot: Option) -> (clean::Crate, RenderInfo) { // Parse, resolve, and typecheck the given crate. @@ -112,7 +141,7 @@ pub fn run_core(search_paths: SearchPaths, let warning_lint = lint::builtin::WARNINGS.name_lower(); let sessopts = config::Options { - maybe_sysroot: None, + maybe_sysroot: maybe_sysroot, search_paths: search_paths, crate_types: vec!(config::CrateTypeRlib), lint_opts: vec!((warning_lint, lint::Allow)), @@ -146,7 +175,7 @@ pub fn run_core(search_paths: SearchPaths, let driver::ExpansionResult { defs, analysis, resolutions, mut hir_forest, .. } = { driver::phase_2_configure_and_expand( - &sess, &cstore, krate, &name, None, resolve::MakeGlobMap::No, |_| Ok(()), + &sess, &cstore, krate, None, &name, None, resolve::MakeGlobMap::No, |_| Ok(()), ).expect("phase_2_configure_and_expand aborted in rustdoc!") }; @@ -159,7 +188,7 @@ pub fn run_core(search_paths: SearchPaths, resolutions, &arenas, &name, - |tcx, _, analysis, result| { + |tcx, _, analysis, _, result| { if let Err(_) = result { sess.fatal("Compilation failed, aborting rustdoc"); } @@ -178,11 +207,14 @@ pub fn run_core(search_paths: SearchPaths, map: &tcx.map, maybe_typed: Typed(tcx), input: input, - populated_crate_impls: RefCell::new(HashSet::new()), + populated_all_crate_impls: Cell::new(false), deref_trait_did: Cell::new(None), + deref_mut_trait_did: Cell::new(None), access_levels: RefCell::new(access_levels), - external_traits: RefCell::new(HashMap::new()), - renderinfo: RefCell::new(Default::default()), + external_traits: Default::default(), + renderinfo: Default::default(), + ty_substs: Default::default(), + lt_substs: Default::default(), }; debug!("crate: {:?}", ctxt.map.krate()); diff --git a/src/librustdoc/doctree.rs b/src/librustdoc/doctree.rs index 04d176c36c..609ae0c0e6 100644 --- a/src/librustdoc/doctree.rs +++ b/src/librustdoc/doctree.rs @@ -21,6 +21,7 @@ use syntax::ptr::P; use syntax_pos::{self, Span}; use rustc::hir; +use rustc::hir::def_id::CrateNum; pub struct Module { pub name: Option, @@ -30,6 +31,7 @@ pub struct Module { pub extern_crates: Vec, pub imports: Vec, pub structs: Vec, + pub unions: Vec, pub enums: Vec, pub fns: Vec, pub mods: Vec, @@ -52,7 +54,7 @@ impl Module { pub fn new(name: Option) -> Module { Module { name : name, - id: 0, + id: ast::CRATE_NODE_ID, vis: hir::Inherited, stab: None, depr: None, @@ -62,6 +64,7 @@ impl Module { extern_crates: Vec::new(), imports : Vec::new(), structs : Vec::new(), + unions : Vec::new(), enums : Vec::new(), fns : Vec::new(), mods : Vec::new(), @@ -80,14 +83,12 @@ impl Module { #[derive(Debug, Clone, RustcEncodable, RustcDecodable, Copy)] pub enum StructType { - /// A normal struct + /// A braced struct Plain, /// A tuple struct Tuple, - /// A newtype struct (tuple struct with one element) - Newtype, /// A unit struct - Unit + Unit, } pub enum TypeBound { @@ -108,6 +109,19 @@ pub struct Struct { pub whence: Span, } +pub struct Union { + pub vis: hir::Visibility, + pub stab: Option, + pub depr: Option, + pub id: NodeId, + pub struct_type: StructType, + pub name: Name, + pub generics: hir::Generics, + pub attrs: hir::HirVec, + pub fields: hir::HirVec, + pub whence: Span, +} + pub struct Enum { pub vis: hir::Visibility, pub stab: Option, @@ -232,7 +246,7 @@ pub struct Macro { pub struct ExternCrate { pub name: Name, - pub cnum: ast::CrateNum, + pub cnum: CrateNum, pub path: Option, pub vis: hir::Visibility, pub attrs: hir::HirVec, @@ -247,15 +261,10 @@ pub struct Import { pub whence: Span, } -pub fn struct_type_from_def(sd: &hir::VariantData) -> StructType { - if !sd.is_struct() { - // We are in a tuple-struct - match sd.fields().len() { - 0 => Unit, - 1 => Newtype, - _ => Tuple - } - } else { - Plain +pub fn struct_type_from_def(vdata: &hir::VariantData) -> StructType { + match *vdata { + hir::VariantData::Struct(..) => Plain, + hir::VariantData::Tuple(..) => Tuple, + hir::VariantData::Unit(..) => Unit, } } diff --git a/src/librustdoc/fold.rs b/src/librustdoc/fold.rs index 5595c74925..8d6ab221c4 100644 --- a/src/librustdoc/fold.rs +++ b/src/librustdoc/fold.rs @@ -49,6 +49,13 @@ pub trait DocFolder : Sized { i.fields.iter().any(|f| f.is_stripped()); StructItem(i) }, + UnionItem(mut i) => { + let num_fields = i.fields.len(); + i.fields = i.fields.into_iter().filter_map(|x| self.fold_item(x)).collect(); + i.fields_stripped |= num_fields != i.fields.len() || + i.fields.iter().any(|f| f.is_stripped()); + UnionItem(i) + }, EnumItem(mut i) => { let num_variants = i.variants.len(); i.variants = i.variants.into_iter().filter_map(|x| self.fold_item(x)).collect(); diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs index 2f03b235e9..adcdc7aaab 100644 --- a/src/librustdoc/html/format.rs +++ b/src/librustdoc/html/format.rs @@ -18,12 +18,11 @@ use std::fmt; use std::iter::repeat; -use rustc::middle::cstore::LOCAL_CRATE; -use rustc::hir::def_id::DefId; +use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use syntax::abi::Abi; use rustc::hir; -use clean; +use clean::{self, PrimitiveType}; use core::DocAccessLevels; use html::item_type::ItemType; use html::escape::Escape; @@ -326,7 +325,7 @@ pub fn href(did: DefId) -> Option<(String, ItemType, Vec)> { url.push_str("/index.html"); } _ => { - url.push_str(shortty.to_static_str()); + url.push_str(shortty.css_class()); url.push_str("."); url.push_str(fqp.last().unwrap()); url.push_str(".html"); @@ -468,39 +467,39 @@ impl fmt::Display for clean::Type { } clean::Tuple(ref typs) => { match &typs[..] { - &[] => primitive_link(f, clean::PrimitiveTuple, "()"), + &[] => primitive_link(f, PrimitiveType::Tuple, "()"), &[ref one] => { - primitive_link(f, clean::PrimitiveTuple, "(")?; + primitive_link(f, PrimitiveType::Tuple, "(")?; write!(f, "{},", one)?; - primitive_link(f, clean::PrimitiveTuple, ")") + primitive_link(f, PrimitiveType::Tuple, ")") } many => { - primitive_link(f, clean::PrimitiveTuple, "(")?; + primitive_link(f, PrimitiveType::Tuple, "(")?; write!(f, "{}", CommaSep(&many))?; - primitive_link(f, clean::PrimitiveTuple, ")") + primitive_link(f, PrimitiveType::Tuple, ")") } } } clean::Vector(ref t) => { - primitive_link(f, clean::Slice, &format!("["))?; + primitive_link(f, PrimitiveType::Slice, &format!("["))?; write!(f, "{}", t)?; - primitive_link(f, clean::Slice, &format!("]")) + primitive_link(f, PrimitiveType::Slice, &format!("]")) } clean::FixedVector(ref t, ref s) => { - primitive_link(f, clean::PrimitiveType::Array, "[")?; + primitive_link(f, PrimitiveType::Array, "[")?; write!(f, "{}", t)?; - primitive_link(f, clean::PrimitiveType::Array, + primitive_link(f, PrimitiveType::Array, &format!("; {}]", Escape(s))) } clean::Never => f.write_str("!"), clean::RawPointer(m, ref t) => { match **t { clean::Generic(_) | clean::ResolvedPath {is_generic: true, ..} => { - primitive_link(f, clean::PrimitiveType::PrimitiveRawPointer, + primitive_link(f, clean::PrimitiveType::RawPointer, &format!("*{}{}", RawMutableSpace(m), t)) } _ => { - primitive_link(f, clean::PrimitiveType::PrimitiveRawPointer, + primitive_link(f, clean::PrimitiveType::RawPointer, &format!("*{}", RawMutableSpace(m)))?; write!(f, "{}", t) } @@ -516,12 +515,13 @@ impl fmt::Display for clean::Type { clean::Vector(ref bt) => { // BorrowedRef{ ... Vector(T) } is &[T] match **bt { clean::Generic(_) => - primitive_link(f, clean::Slice, + primitive_link(f, PrimitiveType::Slice, &format!("&{}{}[{}]", lt, m, **bt)), _ => { - primitive_link(f, clean::Slice, &format!("&{}{}[", lt, m))?; + primitive_link(f, PrimitiveType::Slice, + &format!("&{}{}[", lt, m))?; write!(f, "{}", **bt)?; - primitive_link(f, clean::Slice, "]") + primitive_link(f, PrimitiveType::Slice, "]") } } } diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs index 6cb79d6e86..881352cb73 100644 --- a/src/librustdoc/html/highlight.rs +++ b/src/librustdoc/html/highlight.rs @@ -17,7 +17,7 @@ //! the `render_inner_with_highlighting` or `render_with_highlighting` //! functions. For more advanced use cases (if you want to supply your own css //! classes or control how the HTML is generated, or even generate something -//! other then HTML), then you should implement the the `Writer` trait and use a +//! other then HTML), then you should implement the `Writer` trait and use a //! `Classifier`. use html::escape::Escape; @@ -33,7 +33,8 @@ use syntax::parse; use syntax_pos::Span; /// Highlights `src`, returning the HTML output. -pub fn render_with_highlighting(src: &str, class: Option<&str>, id: Option<&str>) -> String { +pub fn render_with_highlighting(src: &str, class: Option<&str>, id: Option<&str>, + extension: Option<&str>) -> String { debug!("highlighting: ================\n{}\n==============", src); let sess = parse::ParseSess::new(); let fm = sess.codemap().new_filemap("".to_string(), None, src.to_string()); @@ -47,6 +48,9 @@ pub fn render_with_highlighting(src: &str, class: Option<&str>, id: Option<&str> return format!("

{}
", src); } + if let Some(extension) = extension { + write!(out, "{}", extension).unwrap(); + } write_footer(&mut out).unwrap(); String::from_utf8_lossy(&out[..]).into_owned() } diff --git a/src/librustdoc/html/item_type.rs b/src/librustdoc/html/item_type.rs index 74f7b09904..b93dc17dbd 100644 --- a/src/librustdoc/html/item_type.rs +++ b/src/librustdoc/html/item_type.rs @@ -40,10 +40,19 @@ pub enum ItemType { AssociatedType = 16, Constant = 17, AssociatedConst = 18, + Union = 19, } -impl ItemType { - pub fn from_item(item: &clean::Item) -> ItemType { + +#[derive(Copy, Eq, PartialEq, Clone)] +pub enum NameSpace { + Type, + Value, + Macro, +} + +impl<'a> From<&'a clean::Item> for ItemType { + fn from(item: &'a clean::Item) -> ItemType { let inner = match item.inner { clean::StrippedItem(box ref item) => item, ref inner@_ => inner, @@ -54,6 +63,7 @@ impl ItemType { clean::ExternCrateItem(..) => ItemType::ExternCrate, clean::ImportItem(..) => ItemType::Import, clean::StructItem(..) => ItemType::Struct, + clean::UnionItem(..) => ItemType::Union, clean::EnumItem(..) => ItemType::Enum, clean::FunctionItem(..) => ItemType::Function, clean::TypedefItem(..) => ItemType::Typedef, @@ -75,10 +85,13 @@ impl ItemType { clean::StrippedItem(..) => unreachable!(), } } +} - pub fn from_type_kind(kind: clean::TypeKind) -> ItemType { +impl From for ItemType { + fn from(kind: clean::TypeKind) -> ItemType { match kind { clean::TypeStruct => ItemType::Struct, + clean::TypeUnion => ItemType::Union, clean::TypeEnum => ItemType::Enum, clean::TypeFunction => ItemType::Function, clean::TypeTrait => ItemType::Trait, @@ -89,13 +102,16 @@ impl ItemType { clean::TypeTypedef => ItemType::Typedef, } } +} - pub fn to_static_str(&self) -> &'static str { +impl ItemType { + pub fn css_class(&self) -> &'static str { match *self { ItemType::Module => "mod", ItemType::ExternCrate => "externcrate", ItemType::Import => "import", ItemType::Struct => "struct", + ItemType::Union => "union", ItemType::Enum => "enum", ItemType::Function => "fn", ItemType::Typedef => "type", @@ -113,9 +129,56 @@ impl ItemType { ItemType::AssociatedConst => "associatedconstant", } } + + pub fn name_space(&self) -> NameSpace { + match *self { + ItemType::Struct | + ItemType::Union | + ItemType::Enum | + ItemType::Module | + ItemType::Typedef | + ItemType::Trait | + ItemType::Primitive | + ItemType::AssociatedType => NameSpace::Type, + + ItemType::ExternCrate | + ItemType::Import | + ItemType::Function | + ItemType::Static | + ItemType::Impl | + ItemType::TyMethod | + ItemType::Method | + ItemType::StructField | + ItemType::Variant | + ItemType::Constant | + ItemType::AssociatedConst => NameSpace::Value, + + ItemType::Macro => NameSpace::Macro, + } + } } impl fmt::Display for ItemType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.css_class().fmt(f) + } +} + +pub const NAMESPACE_TYPE: &'static str = "t"; +pub const NAMESPACE_VALUE: &'static str = "v"; +pub const NAMESPACE_MACRO: &'static str = "m"; + +impl NameSpace { + pub fn to_static_str(&self) -> &'static str { + match *self { + NameSpace::Type => NAMESPACE_TYPE, + NameSpace::Value => NAMESPACE_VALUE, + NameSpace::Macro => NAMESPACE_MACRO, + } + } +} + +impl fmt::Display for NameSpace { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.to_static_str().fmt(f) } diff --git a/src/librustdoc/html/layout.rs b/src/librustdoc/html/layout.rs index 265ed6be15..151e138efe 100644 --- a/src/librustdoc/html/layout.rs +++ b/src/librustdoc/html/layout.rs @@ -24,7 +24,7 @@ pub struct Layout { pub struct Page<'a> { pub title: &'a str, - pub ty: &'a str, + pub css_class: &'a str, pub root_path: &'a str, pub description: &'a str, pub keywords: &'a str, @@ -80,7 +80,7 @@ r##" -
{content}
+
{content}
@@ -152,7 +152,7 @@ r##" }, content = *t, root_path = page.root_path, - ty = page.ty, + css_class = page.css_class, logo = if layout.logo.is_empty() { "".to_string() } else { diff --git a/src/librustdoc/html/markdown.rs b/src/librustdoc/html/markdown.rs index 139e103317..e9a1f650c9 100644 --- a/src/librustdoc/html/markdown.rs +++ b/src/librustdoc/html/markdown.rs @@ -27,7 +27,6 @@ #![allow(non_camel_case_types)] use libc; -use rustc::session::config::get_unstable_features_setting; use std::ascii::AsciiExt; use std::cell::RefCell; use std::default::Default; @@ -262,9 +261,11 @@ pub fn render(w: &mut fmt::Formatter, s: &str, print_toc: bool) -> fmt::Result { &Default::default()); s.push_str(&format!("{}", Escape(&test))); }); - s.push_str(&highlight::render_with_highlighting(&text, - Some("rust-example-rendered"), - None)); + s.push_str(&highlight::render_with_highlighting( + &text, + Some("rust-example-rendered"), + None, + Some("
Run"))); let output = CString::new(s).unwrap(); hoedown_buffer_puts(ob, output.as_ptr()); }) @@ -476,13 +477,10 @@ impl LangString { let mut data = LangString::all_false(); let mut allow_compile_fail = false; let mut allow_error_code_check = false; - match get_unstable_features_setting() { - UnstableFeatures::Allow | UnstableFeatures::Cheat => { - allow_compile_fail = true; - allow_error_code_check = true; - } - _ => {}, - }; + if UnstableFeatures::from_environment().is_nightly_build() { + allow_compile_fail = true; + allow_error_code_check = true; + } let tokens = string.split(|c: char| !(c == '_' || c == '-' || c.is_alphanumeric()) diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs index e4e886c853..9c80f6e98c 100644 --- a/src/librustdoc/html/render.rs +++ b/src/librustdoc/html/render.rs @@ -37,11 +37,11 @@ pub use self::ExternalLocation::*; use std::ascii::AsciiExt; use std::cell::RefCell; use std::cmp::Ordering; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::BTreeMap; use std::default::Default; use std::error; use std::fmt::{self, Display, Formatter}; -use std::fs::{self, File}; +use std::fs::{self, File, OpenOptions}; use std::io::prelude::*; use std::io::{self, BufWriter, BufReader}; use std::iter::repeat; @@ -53,16 +53,16 @@ use std::sync::Arc; use externalfiles::ExternalHtml; use serialize::json::{ToJson, Json, as_json}; -use syntax::{abi, ast}; +use syntax::abi; use syntax::feature_gate::UnstableFeatures; -use rustc::middle::cstore::LOCAL_CRATE; -use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId}; +use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, LOCAL_CRATE}; use rustc::middle::privacy::AccessLevels; use rustc::middle::stability; -use rustc::session::config::get_unstable_features_setting; use rustc::hir; +use rustc::util::nodemap::{FnvHashMap, FnvHashSet}; +use rustc_data_structures::flock; -use clean::{self, Attributes, GetDefId}; +use clean::{self, Attributes, GetDefId, SelfTy, Mutability}; use doctree; use fold::DocFolder; use html::escape::Escape; @@ -114,9 +114,9 @@ pub struct SharedContext { /// `true`. pub include_sources: bool, /// The local file sources we've emitted and their respective url-paths. - pub local_sources: HashMap, + pub local_sources: FnvHashMap, /// All the passes that were run on this crate. - pub passes: HashSet, + pub passes: FnvHashSet, /// The base-URL of the issue tracker for when an item has been tagged with /// an issue number. pub issue_tracker_base_url: Option, @@ -211,7 +211,7 @@ pub struct Cache { /// Mapping of typaram ids to the name of the type parameter. This is used /// when pretty-printing a type (so pretty printing doesn't have to /// painfully maintain a context like this) - pub typarams: HashMap, + pub typarams: FnvHashMap, /// Maps a type id to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print @@ -219,35 +219,35 @@ pub struct Cache { /// /// The values of the map are a list of implementations and documentation /// found on that implementation. - pub impls: HashMap>, + pub impls: FnvHashMap>, /// Maintains a mapping of local crate node ids to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. - pub paths: HashMap, ItemType)>, + pub paths: FnvHashMap, ItemType)>, /// Similar to `paths`, but only holds external paths. This is only used for /// generating explicit hyperlinks to other crates. - pub external_paths: HashMap, ItemType)>, + pub external_paths: FnvHashMap, ItemType)>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. - pub traits: HashMap, + pub traits: FnvHashMap, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait - pub implementors: HashMap>, + pub implementors: FnvHashMap>, /// Cache of where external crate documentation can be found. - pub extern_locations: HashMap, + pub extern_locations: FnvHashMap, /// Cache of where documentation for primitives can be found. - pub primitive_locations: HashMap, + pub primitive_locations: FnvHashMap, // Note that external items for which `doc(hidden)` applies to are shown as // non-reachable while local items aren't. This is because we're reusing @@ -260,27 +260,29 @@ pub struct Cache { parent_stack: Vec, parent_is_trait_impl: bool, search_index: Vec, - seen_modules: HashSet, + seen_modules: FnvHashSet, seen_mod: bool, stripped_mod: bool, deref_trait_did: Option, + deref_mut_trait_did: Option, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. - orphan_methods: Vec<(DefId, clean::Item)>, + orphan_impl_items: Vec<(DefId, clean::Item)>, } /// Temporary storage for data obtained during `RustdocVisitor::clean()`. /// Later on moved into `CACHE_KEY`. #[derive(Default)] pub struct RenderInfo { - pub inlined: HashSet, + pub inlined: FnvHashSet, pub external_paths: ::core::ExternalPaths, - pub external_typarams: HashMap, + pub external_typarams: FnvHashMap, pub deref_trait_did: Option, + pub deref_mut_trait_did: Option, } /// Helper struct to render all source code to HTML pages @@ -377,10 +379,10 @@ impl ToJson for IndexItemFunctionType { thread_local!(static CACHE_KEY: RefCell> = Default::default()); thread_local!(pub static CURRENT_LOCATION_KEY: RefCell> = RefCell::new(Vec::new())); -thread_local!(static USED_ID_MAP: RefCell> = +thread_local!(static USED_ID_MAP: RefCell> = RefCell::new(init_ids())); -fn init_ids() -> HashMap { +fn init_ids() -> FnvHashMap { [ "main", "search", @@ -407,7 +409,7 @@ pub fn reset_ids(embedded: bool) { *s.borrow_mut() = if embedded { init_ids() } else { - HashMap::new() + FnvHashMap() }; }); } @@ -432,7 +434,7 @@ pub fn derive_id(candidate: String) -> String { pub fn run(mut krate: clean::Crate, external_html: &ExternalHtml, dst: PathBuf, - passes: HashSet, + passes: FnvHashSet, css_file_extension: Option, renderinfo: RenderInfo) -> Result<(), Error> { let src_root = match krate.src.parent() { @@ -443,7 +445,7 @@ pub fn run(mut krate: clean::Crate, src_root: src_root, passes: passes, include_sources: true, - local_sources: HashMap::new(), + local_sources: FnvHashMap(), issue_tracker_base_url: None, layout: layout::Layout { logo: "".to_string(), @@ -506,30 +508,32 @@ pub fn run(mut krate: clean::Crate, external_paths, external_typarams, deref_trait_did, + deref_mut_trait_did, } = renderinfo; let external_paths = external_paths.into_iter() - .map(|(k, (v, t))| (k, (v, ItemType::from_type_kind(t)))) + .map(|(k, (v, t))| (k, (v, ItemType::from(t)))) .collect(); let mut cache = Cache { - impls: HashMap::new(), + impls: FnvHashMap(), external_paths: external_paths, - paths: HashMap::new(), - implementors: HashMap::new(), + paths: FnvHashMap(), + implementors: FnvHashMap(), stack: Vec::new(), parent_stack: Vec::new(), search_index: Vec::new(), parent_is_trait_impl: false, - extern_locations: HashMap::new(), - primitive_locations: HashMap::new(), - seen_modules: HashSet::new(), + extern_locations: FnvHashMap(), + primitive_locations: FnvHashMap(), + seen_modules: FnvHashSet(), seen_mod: false, stripped_mod: false, access_levels: krate.access_levels.clone(), - orphan_methods: Vec::new(), - traits: mem::replace(&mut krate.external_traits, HashMap::new()), + orphan_impl_items: Vec::new(), + traits: mem::replace(&mut krate.external_traits, FnvHashMap()), deref_trait_did: deref_trait_did, + deref_mut_trait_did: deref_mut_trait_did, typarams: external_typarams, }; @@ -574,20 +578,20 @@ pub fn run(mut krate: clean::Crate, /// Build the search index from the collected metadata fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String { - let mut nodeid_to_pathid = HashMap::new(); + let mut nodeid_to_pathid = FnvHashMap(); let mut crate_items = Vec::with_capacity(cache.search_index.len()); let mut crate_paths = Vec::::new(); let Cache { ref mut search_index, - ref orphan_methods, + ref orphan_impl_items, ref mut paths, .. } = *cache; - // Attach all orphan methods to the type's definition if the type + // Attach all orphan items to the type's definition if the type // has since been learned. - for &(did, ref item) in orphan_methods { + for &(did, ref item) in orphan_impl_items { if let Some(&(ref fqp, _)) = paths.get(&did) { search_index.push(IndexItem { - ty: shortty(item), + ty: item_type(item), name: item.name.clone().unwrap(), path: fqp[..fqp.len() - 1].join("::"), desc: Escape(&shorter(item.doc_value())).to_string(), @@ -650,7 +654,7 @@ fn write_shared(cx: &Context, // docs placed in the output directory, so this needs to be a synchronized // operation with respect to all other rustdocs running around. try_err!(mkdir(&cx.dst), &cx.dst); - let _lock = ::flock::Lock::new(&cx.dst.join(".lock")); + let _lock = flock::Lock::panicking_new(&cx.dst.join(".lock"), true, true, true); // Add all the static files. These may already exist, but we just // overwrite them anyway to make sure that they're fresh and up-to-date. @@ -714,10 +718,10 @@ fn write_shared(cx: &Context, for line in BufReader::new(File::open(path)?).lines() { let line = line?; if !line.starts_with(key) { - continue + continue; } if line.starts_with(&format!(r#"{}["{}"]"#, key, krate)) { - continue + continue; } ret.push(line.to_string()); } @@ -761,7 +765,7 @@ fn write_shared(cx: &Context, try_err!(mkdir(&mydst), &mydst); } mydst.push(&format!("{}.{}.js", - remote_item_type.to_static_str(), + remote_item_type.css_class(), remote_path[remote_path.len() - 1])); let all_implementors = try_err!(collect(&mydst, &krate.name, "implementors"), @@ -832,8 +836,8 @@ fn mkdir(path: &Path) -> io::Result<()> { } /// Returns a documentation-level item type from the item. -fn shortty(item: &clean::Item) -> ItemType { - ItemType::from_item(item) +fn item_type(item: &clean::Item) -> ItemType { + ItemType::from(item) } /// Takes a path to a source file and cleans the path to it. This canonicalizes @@ -952,7 +956,7 @@ impl<'a> SourceCollector<'a> { let mut fname = p.file_name().expect("source has no filename") .to_os_string(); fname.push(".html"); - cur.push(&fname[..]); + cur.push(&fname); href.push_str(&fname.to_string_lossy()); let mut w = BufWriter::new(File::create(&cur)?); @@ -961,7 +965,7 @@ impl<'a> SourceCollector<'a> { let desc = format!("Source to the Rust file `{}`.", filename); let page = layout::Page { title: &title, - ty: "source", + css_class: "source", root_path: &root_path, description: &desc, keywords: BASIC_KEYWORDS, @@ -997,17 +1001,8 @@ impl DocFolder for Cache { // Register any generics to their corresponding string. This is used // when pretty-printing types - match item.inner { - clean::StructItem(ref s) => self.generics(&s.generics), - clean::EnumItem(ref e) => self.generics(&e.generics), - clean::FunctionItem(ref f) => self.generics(&f.generics), - clean::TypedefItem(ref t, _) => self.generics(&t.generics), - clean::TraitItem(ref t) => self.generics(&t.generics), - clean::ImplItem(ref i) => self.generics(&i.generics), - clean::TyMethodItem(ref i) => self.generics(&i.generics), - clean::MethodItem(ref i) => self.generics(&i.generics), - clean::ForeignFunctionItem(ref f) => self.generics(&f.generics), - _ => {} + if let Some(generics) = item.inner.generics() { + self.generics(generics); } if !self.seen_mod { @@ -1031,7 +1026,7 @@ impl DocFolder for Cache { // Index this method for searching later on if let Some(ref s) = item.name { - let (parent, is_method) = match item.inner { + let (parent, is_inherent_impl_item) = match item.inner { clean::StrippedItem(..) => ((None, None), false), clean::AssociatedConstItem(..) | clean::TypedefItem(_, true) if self.parent_is_trait_impl => { @@ -1039,7 +1034,6 @@ impl DocFolder for Cache { ((None, None), false) } clean::AssociatedTypeItem(..) | - clean::AssociatedConstItem(..) | clean::TyMethodItem(..) | clean::StructFieldItem(..) | clean::VariantItem(..) => { @@ -1047,7 +1041,7 @@ impl DocFolder for Cache { Some(&self.stack[..self.stack.len() - 1])), false) } - clean::MethodItem(..) => { + clean::MethodItem(..) | clean::AssociatedConstItem(..) => { if self.parent_stack.is_empty() { ((None, None), false) } else { @@ -1060,6 +1054,7 @@ impl DocFolder for Cache { // information if present. Some(&(ref fqp, ItemType::Trait)) | Some(&(ref fqp, ItemType::Struct)) | + Some(&(ref fqp, ItemType::Union)) | Some(&(ref fqp, ItemType::Enum)) => Some(&fqp[..fqp.len() - 1]), Some(..) => Some(&*self.stack), @@ -1072,7 +1067,7 @@ impl DocFolder for Cache { }; match parent { - (parent, Some(path)) if is_method || (!self.stripped_mod) => { + (parent, Some(path)) if is_inherent_impl_item || (!self.stripped_mod) => { debug_assert!(!item.is_stripped()); // A crate has a module at its root, containing all items, @@ -1080,7 +1075,7 @@ impl DocFolder for Cache { // inserted later on when serializing the search-index. if item.def_id.index != CRATE_DEF_INDEX { self.search_index.push(IndexItem { - ty: shortty(&item), + ty: item_type(&item), name: s.to_string(), path: path.join("::").to_string(), desc: Escape(&shorter(item.doc_value())).to_string(), @@ -1090,10 +1085,10 @@ impl DocFolder for Cache { }); } } - (Some(parent), None) if is_method => { + (Some(parent), None) if is_inherent_impl_item => { // We have a parent, but we don't know where they're // defined yet. Wait for later to index this item. - self.orphan_methods.push((parent, item.clone())); + self.orphan_impl_items.push((parent, item.clone())); } _ => {} } @@ -1113,7 +1108,8 @@ impl DocFolder for Cache { clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) | - clean::ConstantItem(..) | clean::StaticItem(..) + clean::ConstantItem(..) | clean::StaticItem(..) | + clean::UnionItem(..) if !self.stripped_mod => { // Reexported items mean that the same id can show up twice // in the rustdoc ast that we're looking at. We know, @@ -1126,7 +1122,7 @@ impl DocFolder for Cache { self.access_levels.is_public(item.def_id) { self.paths.insert(item.def_id, - (self.stack.clone(), shortty(&item))); + (self.stack.clone(), item_type(&item))); } } // link variants to their parent enum because pages aren't emitted @@ -1139,7 +1135,7 @@ impl DocFolder for Cache { clean::PrimitiveItem(..) if item.visibility.is_some() => { self.paths.insert(item.def_id, (self.stack.clone(), - shortty(&item))); + item_type(&item))); } _ => {} @@ -1148,7 +1144,8 @@ impl DocFolder for Cache { // Maintain the parent stack let orig_parent_is_trait_impl = self.parent_is_trait_impl; let parent_pushed = match item.inner { - clean::TraitItem(..) | clean::EnumItem(..) | clean::StructItem(..) => { + clean::TraitItem(..) | clean::EnumItem(..) | + clean::StructItem(..) | clean::UnionItem(..) => { self.parent_stack.push(item.def_id); self.parent_is_trait_impl = false; true @@ -1283,74 +1280,77 @@ impl Context { Ok(()) } - /// Non-parallelized version of rendering an item. This will take the input - /// item, render its contents, and then invoke the specified closure with - /// all sub-items which need to be rendered. - /// - /// The rendering driver uses this closure to queue up more work. - fn item(&mut self, item: clean::Item, mut f: F) -> Result<(), Error> where - F: FnMut(&mut Context, clean::Item), - { - fn render(writer: &mut io::Write, cx: &Context, it: &clean::Item, - pushname: bool) -> io::Result<()> { - // A little unfortunate that this is done like this, but it sure - // does make formatting *a lot* nicer. - CURRENT_LOCATION_KEY.with(|slot| { - *slot.borrow_mut() = cx.current.clone(); - }); + fn render_item(&self, + writer: &mut io::Write, + it: &clean::Item, + pushname: bool) + -> io::Result<()> { + // A little unfortunate that this is done like this, but it sure + // does make formatting *a lot* nicer. + CURRENT_LOCATION_KEY.with(|slot| { + *slot.borrow_mut() = self.current.clone(); + }); - let mut title = if it.is_primitive() { - // No need to include the namespace for primitive types - String::new() - } else { - cx.current.join("::") - }; - if pushname { - if !title.is_empty() { - title.push_str("::"); - } - title.push_str(it.name.as_ref().unwrap()); + let mut title = if it.is_primitive() { + // No need to include the namespace for primitive types + String::new() + } else { + self.current.join("::") + }; + if pushname { + if !title.is_empty() { + title.push_str("::"); } - title.push_str(" - Rust"); - let tyname = shortty(it).to_static_str(); - let desc = if it.is_crate() { - format!("API documentation for the Rust `{}` crate.", - cx.shared.layout.krate) - } else { - format!("API documentation for the Rust `{}` {} in crate `{}`.", - it.name.as_ref().unwrap(), tyname, cx.shared.layout.krate) - }; - let keywords = make_item_keywords(it); - let page = layout::Page { - ty: tyname, - root_path: &cx.root_path, - title: &title, - description: &desc, - keywords: &keywords, - }; + title.push_str(it.name.as_ref().unwrap()); + } + title.push_str(" - Rust"); + let tyname = item_type(it).css_class(); + let desc = if it.is_crate() { + format!("API documentation for the Rust `{}` crate.", + self.shared.layout.krate) + } else { + format!("API documentation for the Rust `{}` {} in crate `{}`.", + it.name.as_ref().unwrap(), tyname, self.shared.layout.krate) + }; + let keywords = make_item_keywords(it); + let page = layout::Page { + css_class: tyname, + root_path: &self.root_path, + title: &title, + description: &desc, + keywords: &keywords, + }; - reset_ids(true); + reset_ids(true); - if !cx.render_redirect_pages { - layout::render(writer, &cx.shared.layout, &page, - &Sidebar{ cx: cx, item: it }, - &Item{ cx: cx, item: it }, - cx.shared.css_file_extension.is_some())?; - } else { - let mut url = repeat("../").take(cx.current.len()) - .collect::(); - if let Some(&(ref names, ty)) = cache().paths.get(&it.def_id) { - for name in &names[..names.len() - 1] { - url.push_str(name); - url.push_str("/"); - } - url.push_str(&item_path(ty, names.last().unwrap())); - layout::redirect(writer, &url)?; + if !self.render_redirect_pages { + layout::render(writer, &self.shared.layout, &page, + &Sidebar{ cx: self, item: it }, + &Item{ cx: self, item: it }, + self.shared.css_file_extension.is_some())?; + } else { + let mut url = repeat("../").take(self.current.len()) + .collect::(); + if let Some(&(ref names, ty)) = cache().paths.get(&it.def_id) { + for name in &names[..names.len() - 1] { + url.push_str(name); + url.push_str("/"); } + url.push_str(&item_path(ty, names.last().unwrap())); + layout::redirect(writer, &url)?; } - Ok(()) } + Ok(()) + } + /// Non-parallelized version of rendering an item. This will take the input + /// item, render its contents, and then invoke the specified closure with + /// all sub-items which need to be rendered. + /// + /// The rendering driver uses this closure to queue up more work. + fn item(&mut self, item: clean::Item, mut f: F) -> Result<(), Error> where + F: FnMut(&mut Context, clean::Item), + { // Stripped modules survive the rustdoc passes (i.e. `strip-private`) // if they contain impls for public types. These modules can also // contain items such as publicly reexported structures. @@ -1359,7 +1359,7 @@ impl Context { // these modules are recursed into, but not rendered normally // (a flag on the context). if !self.render_redirect_pages { - self.render_redirect_pages = self.maybe_ignore_item(&item); + self.render_redirect_pages = maybe_ignore_item(&item); } if item.is_mod() { @@ -1371,7 +1371,7 @@ impl Context { let item = item.take().unwrap(); let mut buf = Vec::new(); - render(&mut buf, this, &item, false).unwrap(); + this.render_item(&mut buf, &item, false).unwrap(); // buf will be empty if the module is stripped and there is no redirect for it if !buf.is_empty() { let joint_dst = this.dst.join("index.html"); @@ -1386,7 +1386,7 @@ impl Context { _ => unreachable!() }; - // render sidebar-items.js used throughout this module + // Render sidebar-items.js used throughout this module. if !this.render_redirect_pages { let items = this.build_sidebar_items(&m); let js_dst = this.dst.join("sidebar-items.js"); @@ -1398,32 +1398,53 @@ impl Context { for item in m.items { f(this,item); } + Ok(()) - }) + })?; } else if item.name.is_some() { let mut buf = Vec::new(); - render(&mut buf, self, &item, true).unwrap(); + self.render_item(&mut buf, &item, true).unwrap(); // buf will be empty if the item is stripped and there is no redirect for it if !buf.is_empty() { - let joint_dst = self.dst.join(&item_path(shortty(&item), - item.name.as_ref().unwrap())); + let name = item.name.as_ref().unwrap(); + let item_type = item_type(&item); + let file_name = &item_path(item_type, name); + let joint_dst = self.dst.join(file_name); try_err!(fs::create_dir_all(&self.dst), &self.dst); let mut dst = try_err!(File::create(&joint_dst), &joint_dst); try_err!(dst.write_all(&buf), &joint_dst); + + // Redirect from a sane URL using the namespace to Rustdoc's + // URL for the page. + let redir_name = format!("{}.{}.html", name, item_type.name_space()); + let redir_dst = self.dst.join(redir_name); + if let Ok(mut redirect_out) = OpenOptions::new().create_new(true) + .write(true) + .open(&redir_dst) { + try_err!(layout::redirect(&mut redirect_out, file_name), &redir_dst); + } + + // If the item is a macro, redirect from the old macro URL (with !) + // to the new one (without). + // FIXME(#35705) remove this redirect. + if item_type == ItemType::Macro { + let redir_name = format!("{}.{}!.html", item_type, name); + let redir_dst = self.dst.join(redir_name); + let mut redirect_out = try_err!(File::create(&redir_dst), &redir_dst); + try_err!(layout::redirect(&mut redirect_out, file_name), &redir_dst); + } } - Ok(()) - } else { - Ok(()) } + Ok(()) } fn build_sidebar_items(&self, m: &clean::Module) -> BTreeMap> { // BTreeMap instead of HashMap to get a sorted output let mut map = BTreeMap::new(); for item in &m.items { - if self.maybe_ignore_item(item) { continue } + if maybe_ignore_item(item) { continue } - let short = shortty(item).to_static_str(); + let short = item_type(item).css_class(); let myname = match item.name { None => continue, Some(ref s) => s.to_string(), @@ -1438,17 +1459,6 @@ impl Context { } return map; } - - fn maybe_ignore_item(&self, it: &clean::Item) -> bool { - match it.inner { - clean::StrippedItem(..) => true, - clean::ModuleItem(ref m) => { - it.doc_value().is_none() && m.items.is_empty() - && it.visibility != Some(clean::Public) - }, - _ => false, - } - } } impl<'a> Item<'a> { @@ -1531,7 +1541,7 @@ impl<'a> Item<'a> { } Some(format!("{path}{file}?gotosrc={goto}", path = path, - file = item_path(shortty(self.item), external_path.last().unwrap()), + file = item_path(item_type(self.item), external_path.last().unwrap()), goto = self.item.def_id.index.as_usize())) } } @@ -1548,12 +1558,22 @@ impl<'a> fmt::Display for Item<'a> { } else { write!(fmt, "Module ")?; }, - clean::FunctionItem(..) => write!(fmt, "Function ")?, + clean::FunctionItem(..) | clean::ForeignFunctionItem(..) => + write!(fmt, "Function ")?, clean::TraitItem(..) => write!(fmt, "Trait ")?, clean::StructItem(..) => write!(fmt, "Struct ")?, + clean::UnionItem(..) => write!(fmt, "Union ")?, clean::EnumItem(..) => write!(fmt, "Enum ")?, + clean::TypedefItem(..) => write!(fmt, "Type Definition ")?, + clean::MacroItem(..) => write!(fmt, "Macro ")?, clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?, - _ => {} + clean::StaticItem(..) | clean::ForeignStaticItem(..) => + write!(fmt, "Static ")?, + clean::ConstantItem(..) => write!(fmt, "Constant ")?, + _ => { + // We don't generate pages for any other type. + unreachable!(); + } } if !self.item.is_primitive() { let cur = &self.cx.current; @@ -1566,7 +1586,7 @@ impl<'a> fmt::Display for Item<'a> { } } write!(fmt, "{}", - shortty(self.item), self.item.name.as_ref().unwrap())?; + item_type(self.item), self.item.name.as_ref().unwrap())?; write!(fmt, "")?; // in-band write!(fmt, "")?; @@ -1607,6 +1627,7 @@ impl<'a> fmt::Display for Item<'a> { item_function(fmt, self.cx, self.item, f), clean::TraitItem(ref t) => item_trait(fmt, self.cx, self.item, t), clean::StructItem(ref s) => item_struct(fmt, self.cx, self.item, s), + clean::UnionItem(ref s) => item_union(fmt, self.cx, self.item, s), clean::EnumItem(ref e) => item_enum(fmt, self.cx, self.item, e), clean::TypedefItem(ref t, _) => item_typedef(fmt, self.cx, self.item, t), clean::MacroItem(ref m) => item_macro(fmt, self.cx, self.item, m), @@ -1614,7 +1635,10 @@ impl<'a> fmt::Display for Item<'a> { clean::StaticItem(ref i) | clean::ForeignStaticItem(ref i) => item_static(fmt, self.cx, self.item, i), clean::ConstantItem(ref c) => item_constant(fmt, self.cx, self.item, c), - _ => Ok(()) + _ => { + // We don't generate pages for any other type. + unreachable!(); + } } } } @@ -1622,7 +1646,7 @@ impl<'a> fmt::Display for Item<'a> { fn item_path(ty: ItemType, name: &str) -> String { match ty { ItemType::Module => format!("{}/index.html", name), - _ => format!("{}.{}.html", ty.to_static_str(), name), + _ => format!("{}.{}.html", ty.css_class(), name), } } @@ -1691,7 +1715,7 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, if let clean::DefaultImplItem(..) = items[*i].inner { return false; } - !cx.maybe_ignore_item(&items[*i]) + !maybe_ignore_item(&items[*i]) }).collect::>(); // the order of item types in the listing @@ -1709,13 +1733,14 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, ItemType::Trait => 9, ItemType::Function => 10, ItemType::Typedef => 12, - _ => 13 + ty as u8, + ItemType::Union => 13, + _ => 14 + ty as u8, } } fn cmp(i1: &clean::Item, i2: &clean::Item, idx1: usize, idx2: usize) -> Ordering { - let ty1 = shortty(i1); - let ty2 = shortty(i2); + let ty1 = item_type(i1); + let ty2 = item_type(i2); if ty1 != ty2 { return (reorder(ty1), idx1).cmp(&(reorder(ty2), idx2)) } @@ -1739,7 +1764,7 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, continue; } - let myty = Some(shortty(myitem)); + let myty = Some(item_type(myitem)); if curty == Some(ItemType::ExternCrate) && myty == Some(ItemType::Import) { // Put `extern crate` and `use` re-exports in the same section. curty = myty; @@ -1753,6 +1778,7 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, ItemType::Import => ("reexports", "Reexports"), ItemType::Module => ("modules", "Modules"), ItemType::Struct => ("structs", "Structs"), + ItemType::Union => ("unions", "Unions"), ItemType::Enum => ("enums", "Enums"), ItemType::Function => ("functions", "Functions"), ItemType::Typedef => ("types", "Type Definitions"), @@ -1818,16 +1844,16 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, {name} - + {stab_docs} {docs} ", name = *myitem.name.as_ref().unwrap(), stab_docs = stab_docs, docs = shorter(Some(&Markdown(doc_value).to_string())), - class = shortty(myitem), + class = item_type(myitem), stab = myitem.stability_class(), - href = item_path(shortty(myitem), myitem.name.as_ref().unwrap()), + href = item_path(item_type(myitem), myitem.name.as_ref().unwrap()), title = full_path(cx, myitem))?; } } @@ -1839,6 +1865,17 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, Ok(()) } +fn maybe_ignore_item(it: &clean::Item) -> bool { + match it.inner { + clean::StrippedItem(..) => true, + clean::ModuleItem(ref m) => { + it.doc_value().is_none() && m.items.is_empty() + && it.visibility != Some(clean::Public) + }, + _ => false, + } +} + fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Vec { let mut stability = vec![]; @@ -1867,7 +1904,7 @@ fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Vec 0 => format!(" (#{})", Escape(&tracker_url), issue_no, issue_no), - (true, _, _) => + (true, ..) => format!(" ({})", Escape(&stab.feature)), _ => String::new(), } @@ -1933,7 +1970,7 @@ fn item_static(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, fn item_function(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, f: &clean::Function) -> fmt::Result { // FIXME(#24111): remove when `const_fn` is stabilized - let vis_constness = match get_unstable_features_setting() { + let vis_constness = match UnstableFeatures::from_environment() { UnstableFeatures::Allow => f.constness, _ => hir::Constness::NotConst }; @@ -2022,14 +2059,18 @@ fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, fn trait_item(w: &mut fmt::Formatter, cx: &Context, m: &clean::Item, t: &clean::Item) -> fmt::Result { let name = m.name.as_ref().unwrap(); - let id = derive_id(format!("{}.{}", shortty(m), name)); - write!(w, "

", + let item_type = item_type(m); + let id = derive_id(format!("{}.{}", item_type, name)); + let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); + write!(w, "

\ +

")?; + write!(w, "

")?; document(w, cx, m)?; Ok(()) } @@ -2104,7 +2145,7 @@ fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, let (ref path, _) = cache.external_paths[&it.def_id]; path[..path.len() - 1].join("/") }, - ty = shortty(it).to_static_str(), + ty = item_type(it).css_class(), name = *it.name.as_ref().unwrap())?; Ok(()) } @@ -2113,7 +2154,7 @@ fn naive_assoc_href(it: &clean::Item, link: AssocItemLink) -> String { use html::item_type::ItemType::*; let name = it.name.as_ref().unwrap(); - let ty = match shortty(it) { + let ty = match item_type(it) { Typedef | AssociatedType => AssociatedType, s@_ => s, }; @@ -2191,7 +2232,7 @@ fn render_assoc_item(w: &mut fmt::Formatter, link: AssocItemLink) -> fmt::Result { let name = meth.name.as_ref().unwrap(); - let anchor = format!("#{}.{}", shortty(meth), name); + let anchor = format!("#{}.{}", item_type(meth), name); let href = match link { AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id), AssocItemLink::Anchor(None) => anchor, @@ -2208,7 +2249,7 @@ fn render_assoc_item(w: &mut fmt::Formatter, } }; // FIXME(#24111): remove when `const_fn` is stabilized - let vis_constness = match get_unstable_features_setting() { + let vis_constness = match UnstableFeatures::from_environment() { UnstableFeatures::Allow => constness, _ => hir::Constness::NotConst }; @@ -2268,9 +2309,19 @@ fn item_struct(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, if fields.peek().is_some() { write!(w, "

Fields

")?; for (field, ty) in fields { - write!(w, "{name}: {ty} - ", - shortty = ItemType::StructField, + let id = derive_id(format!("{}.{}", + ItemType::StructField, + field.name.as_ref().unwrap())); + let ns_id = derive_id(format!("{}.{}", + field.name.as_ref().unwrap(), + ItemType::StructField.name_space())); + write!(w, " + ", + item_type = ItemType::StructField, + id = id, + ns_id = ns_id, stab = field.stability_class(), name = field.name.as_ref().unwrap(), ty = ty)?; @@ -2281,6 +2332,40 @@ fn item_struct(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } +fn item_union(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, + s: &clean::Union) -> fmt::Result { + write!(w, "
")?;
+    render_attributes(w, it)?;
+    render_union(w,
+                 it,
+                 Some(&s.generics),
+                 &s.fields,
+                 "",
+                 true)?;
+    write!(w, "
")?; + + document(w, cx, it)?; + let mut fields = s.fields.iter().filter_map(|f| { + match f.inner { + clean::StructFieldItem(ref ty) => Some((f, ty)), + _ => None, + } + }).peekable(); + if fields.peek().is_some() { + write!(w, "

Fields

")?; + for (field, ty) in fields { + write!(w, "{name}: {ty} + ", + shortty = ItemType::StructField, + stab = field.stability_class(), + name = field.name.as_ref().unwrap(), + ty = ty)?; + document(w, cx, field)?; + } + } + render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) +} + fn item_enum(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, e: &clean::Enum) -> fmt::Result { write!(w, "
")?;
@@ -2339,8 +2424,16 @@ fn item_enum(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
     if !e.variants.is_empty() {
         write!(w, "

Variants

\n")?; for variant in &e.variants { - write!(w, "{name}", - shortty = ItemType::Variant, + let id = derive_id(format!("{}.{}", + ItemType::Variant, + variant.name.as_ref().unwrap())); + let ns_id = derive_id(format!("{}.{}", + variant.name.as_ref().unwrap(), + ItemType::Variant.name_space())); + write!(w, "\ + ")?; + write!(w, "")?; document(w, cx, variant)?; use clean::{Variant, StructVariant}; @@ -2364,10 +2457,20 @@ fn item_enum(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, for field in &s.fields { use clean::StructFieldItem; if let StructFieldItem(ref ty) = field.inner { + let id = derive_id(format!("variant.{}.field.{}", + variant.name.as_ref().unwrap(), + field.name.as_ref().unwrap())); + let ns_id = derive_id(format!("{}.{}.{}.{}", + variant.name.as_ref().unwrap(), + ItemType::Variant.name_space(), + field.name.as_ref().unwrap(), + ItemType::StructField.name_space())); write!(w, "\ - {f}: {t}", - v = variant.name.as_ref().unwrap(), + id='{id}'>\ + ", + id = id, + ns_id = ns_id, f = field.name.as_ref().unwrap(), t = *ty)?; document(w, cx, field)?; @@ -2416,23 +2519,32 @@ fn render_struct(w: &mut fmt::Formatter, it: &clean::Item, if let Some(g) = g { write!(w, "{}", WhereClause(g))? } - write!(w, " {{\n{}", tab)?; + let mut has_visible_fields = false; + write!(w, " {{")?; for field in fields { if let clean::StructFieldItem(ref ty) = field.inner { - write!(w, " {}{}: {},\n{}", + write!(w, "\n{} {}{}: {},", + tab, VisSpace(&field.visibility), field.name.as_ref().unwrap(), - *ty, - tab)?; + *ty)?; + has_visible_fields = true; } } - if it.has_stripped_fields().unwrap() { - write!(w, " // some fields omitted\n{}", tab)?; + if has_visible_fields { + if it.has_stripped_fields().unwrap() { + write!(w, "\n{} // some fields omitted", tab)?; + } + write!(w, "\n{}", tab)?; + } else if it.has_stripped_fields().unwrap() { + // If there are no visible fields we can just display + // `{ /* fields omitted */ }` to save space. + write!(w, " /* fields omitted */ ")?; } write!(w, "}}")?; } - doctree::Tuple | doctree::Newtype => { + doctree::Tuple => { write!(w, "(")?; for (i, field) in fields.iter().enumerate() { if i > 0 { @@ -2465,10 +2577,42 @@ fn render_struct(w: &mut fmt::Formatter, it: &clean::Item, Ok(()) } +fn render_union(w: &mut fmt::Formatter, it: &clean::Item, + g: Option<&clean::Generics>, + fields: &[clean::Item], + tab: &str, + structhead: bool) -> fmt::Result { + write!(w, "{}{}{}", + VisSpace(&it.visibility), + if structhead {"union "} else {""}, + it.name.as_ref().unwrap())?; + if let Some(g) = g { + write!(w, "{}", g)?; + write!(w, "{}", WhereClause(g))?; + } + + write!(w, " {{\n{}", tab)?; + for field in fields { + if let clean::StructFieldItem(ref ty) = field.inner { + write!(w, " {}{}: {},\n{}", + VisSpace(&field.visibility), + field.name.as_ref().unwrap(), + *ty, + tab)?; + } + } + + if it.has_stripped_fields().unwrap() { + write!(w, " // some fields omitted\n{}", tab)?; + } + write!(w, "}}")?; + Ok(()) +} + #[derive(Copy, Clone)] enum AssocItemLink<'a> { Anchor(Option<&'a str>), - GotoSource(DefId, &'a HashSet), + GotoSource(DefId, &'a FnvHashSet), } impl<'a> AssocItemLink<'a> { @@ -2482,7 +2626,13 @@ impl<'a> AssocItemLink<'a> { enum AssocItemRender<'a> { All, - DerefFor { trait_: &'a clean::Type, type_: &'a clean::Type }, + DerefFor { trait_: &'a clean::Type, type_: &'a clean::Type, deref_mut_: bool } +} + +#[derive(Copy, Clone, PartialEq)] +enum RenderMode { + Normal, + ForDeref { mut_: bool }, } fn render_assoc_items(w: &mut fmt::Formatter, @@ -2499,19 +2649,19 @@ fn render_assoc_items(w: &mut fmt::Formatter, i.inner_impl().trait_.is_none() }); if !non_trait.is_empty() { - let render_header = match what { + let render_mode = match what { AssocItemRender::All => { write!(w, "

Methods

")?; - true + RenderMode::Normal } - AssocItemRender::DerefFor { trait_, type_ } => { + AssocItemRender::DerefFor { trait_, type_, deref_mut_ } => { write!(w, "

Methods from \ {}<Target={}>

", trait_, type_)?; - false + RenderMode::ForDeref { mut_: deref_mut_ } } }; for i in &non_trait { - render_impl(w, cx, i, AssocItemLink::Anchor(None), render_header, + render_impl(w, cx, i, AssocItemLink::Anchor(None), render_mode, containing_item.stable_since())?; } } @@ -2523,21 +2673,25 @@ fn render_assoc_items(w: &mut fmt::Formatter, t.inner_impl().trait_.def_id() == c.deref_trait_did }); if let Some(impl_) = deref_impl { - render_deref_methods(w, cx, impl_, containing_item)?; + let has_deref_mut = traits.iter().find(|t| { + t.inner_impl().trait_.def_id() == c.deref_mut_trait_did + }).is_some(); + render_deref_methods(w, cx, impl_, containing_item, has_deref_mut)?; } write!(w, "

Trait \ Implementations

")?; for i in &traits { let did = i.trait_did().unwrap(); let assoc_link = AssocItemLink::GotoSource(did, &i.inner_impl().provided_trait_methods); - render_impl(w, cx, i, assoc_link, true, containing_item.stable_since())?; + render_impl(w, cx, i, assoc_link, + RenderMode::Normal, containing_item.stable_since())?; } } Ok(()) } fn render_deref_methods(w: &mut fmt::Formatter, cx: &Context, impl_: &Impl, - container_item: &clean::Item) -> fmt::Result { + container_item: &clean::Item, deref_mut: bool) -> fmt::Result { let deref_type = impl_.inner_impl().trait_.as_ref().unwrap(); let target = impl_.inner_impl().items.iter().filter_map(|item| { match item.inner { @@ -2545,7 +2699,8 @@ fn render_deref_methods(w: &mut fmt::Formatter, cx: &Context, impl_: &Impl, _ => None, } }).next().expect("Expected associated type binding"); - let what = AssocItemRender::DerefFor { trait_: deref_type, type_: target }; + let what = AssocItemRender::DerefFor { trait_: deref_type, type_: target, + deref_mut_: deref_mut }; if let Some(did) = target.def_id() { render_assoc_items(w, cx, container_item, did, what) } else { @@ -2559,12 +2714,9 @@ fn render_deref_methods(w: &mut fmt::Formatter, cx: &Context, impl_: &Impl, } } -// Render_header is false when we are rendering a `Deref` impl and true -// otherwise. If render_header is false, we will avoid rendering static -// methods, since they are not accessible for the type implementing `Deref` fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLink, - render_header: bool, outer_version: Option<&str>) -> fmt::Result { - if render_header { + render_mode: RenderMode, outer_version: Option<&str>) -> fmt::Result { + if render_mode == RenderMode::Normal { write!(w, "

{}", i.inner_impl())?; write!(w, "")?; let since = i.impl_item.stability.as_ref().map(|s| &s.since[..]); @@ -2584,61 +2736,92 @@ fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLi } } - fn doctraititem(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, - link: AssocItemLink, render_static: bool, - is_default_item: bool, outer_version: Option<&str>, - trait_: Option<&clean::Trait>) -> fmt::Result { - let shortty = shortty(item); + fn doc_impl_item(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, + link: AssocItemLink, render_mode: RenderMode, + is_default_item: bool, outer_version: Option<&str>, + trait_: Option<&clean::Trait>) -> fmt::Result { + let item_type = item_type(item); let name = item.name.as_ref().unwrap(); - let is_static = match item.inner { - clean::MethodItem(ref method) => !method.decl.has_self(), - clean::TyMethodItem(ref method) => !method.decl.has_self(), - _ => false + let render_method_item: bool = match render_mode { + RenderMode::Normal => true, + RenderMode::ForDeref { mut_: deref_mut_ } => { + let self_type_opt = match item.inner { + clean::MethodItem(ref method) => method.decl.self_type(), + clean::TyMethodItem(ref method) => method.decl.self_type(), + _ => None + }; + + if let Some(self_ty) = self_type_opt { + let by_mut_ref = match self_ty { + SelfTy::SelfBorrowed(_lifetime, mutability) => { + mutability == Mutability::Mutable + }, + SelfTy::SelfExplicit(clean::BorrowedRef { mutability, .. }) => { + mutability == Mutability::Mutable + }, + _ => false, + }; + + deref_mut_ || !by_mut_ref + } else { + false + } + }, }; match item.inner { clean::MethodItem(..) | clean::TyMethodItem(..) => { // Only render when the method is not static or we allow static methods - if !is_static || render_static { - let id = derive_id(format!("{}.{}", shortty, name)); - write!(w, "

", id, shortty)?; + if render_method_item { + let id = derive_id(format!("{}.{}", item_type, name)); + let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); + write!(w, "

", id, item_type)?; + write!(w, "

\n")?; + write!(w, "

\n")?; } } clean::TypedefItem(ref tydef, _) => { let id = derive_id(format!("{}.{}", ItemType::AssociatedType, name)); - write!(w, "

", id, shortty)?; + let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); + write!(w, "

", id, item_type)?; + write!(w, "

\n")?; + write!(w, "

\n")?; } clean::AssociatedConstItem(ref ty, ref default) => { - let id = derive_id(format!("{}.{}", shortty, name)); - write!(w, "

", id, shortty)?; + let id = derive_id(format!("{}.{}", item_type, name)); + let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); + write!(w, "

", id, item_type)?; + write!(w, "

\n")?; + write!(w, "

\n")?; } clean::ConstantItem(ref c) => { - let id = derive_id(format!("{}.{}", shortty, name)); - write!(w, "

", id, shortty)?; + let id = derive_id(format!("{}.{}", item_type, name)); + let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); + write!(w, "

", id, item_type)?; + write!(w, "

\n")?; + write!(w, "

\n")?; } clean::AssociatedTypeItem(ref bounds, ref default) => { - let id = derive_id(format!("{}.{}", shortty, name)); - write!(w, "

", id, shortty)?; + let id = derive_id(format!("{}.{}", item_type, name)); + let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); + write!(w, "

", id, item_type)?; + write!(w, "

\n")?; + write!(w, "

\n")?; } clean::StrippedItem(..) => return Ok(()), _ => panic!("can't make docs for trait item with name {:?}", item.name) } - if !is_static || render_static { + if render_method_item || render_mode == RenderMode::Normal { if !is_default_item { if let Some(t) = trait_ { // The trait item may have been stripped so we might not @@ -2671,15 +2854,15 @@ fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLi write!(w, "
")?; for trait_item in &i.inner_impl().items { - doctraititem(w, cx, trait_item, link, render_header, - false, outer_version, trait_)?; + doc_impl_item(w, cx, trait_item, link, render_mode, + false, outer_version, trait_)?; } fn render_default_items(w: &mut fmt::Formatter, cx: &Context, t: &clean::Trait, i: &clean::Impl, - render_static: bool, + render_mode: RenderMode, outer_version: Option<&str>) -> fmt::Result { for trait_item in &t.items { let n = trait_item.name.clone(); @@ -2689,8 +2872,8 @@ fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLi let did = i.trait_.as_ref().unwrap().def_id().unwrap(); let assoc_link = AssocItemLink::GotoSource(did, &i.provided_trait_methods); - doctraititem(w, cx, trait_item, assoc_link, render_static, true, - outer_version, None)?; + doc_impl_item(w, cx, trait_item, assoc_link, render_mode, true, + outer_version, None)?; } Ok(()) } @@ -2698,7 +2881,7 @@ fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLi // If we've implemented a trait, then also emit documentation for all // default items which weren't overridden in the implementation block. if let Some(t) = trait_ { - render_default_items(w, cx, t, &i.inner_impl(), render_header, outer_version)?; + render_default_items(w, cx, t, &i.inner_impl(), render_mode, outer_version)?; } write!(w, "
")?; Ok(()) @@ -2749,7 +2932,7 @@ impl<'a> fmt::Display for Sidebar<'a> { relpath: '{path}'\ }};", name = it.name.as_ref().map(|x| &x[..]).unwrap_or(""), - ty = shortty(it).to_static_str(), + ty = item_type(it).css_class(), path = relpath)?; if parentlen == 0 { // there is no sidebar-items.js beyond the crate root path @@ -2778,7 +2961,7 @@ impl<'a> fmt::Display for Source<'a> { write!(fmt, "{0:1$}\n", i, cols)?; } write!(fmt, "
")?; - write!(fmt, "{}", highlight::render_with_highlighting(s, None, None))?; + write!(fmt, "{}", highlight::render_with_highlighting(s, None, None, None))?; Ok(()) } } @@ -2787,6 +2970,7 @@ fn item_macro(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Macro) -> fmt::Result { w.write_str(&highlight::render_with_highlighting(&t.source, Some("macro"), + None, None))?; render_stability_since_raw(w, it.stable_since(), None)?; document(w, cx, it) diff --git a/src/librustdoc/html/static/jquery-2.1.4.min.js b/src/librustdoc/html/static/jquery-2.1.4.min.js deleted file mode 100644 index 49990d6e14..0000000000 --- a/src/librustdoc/html/static/jquery-2.1.4.min.js +++ /dev/null @@ -1,4 +0,0 @@ -/*! jQuery v2.1.4 | (c) 2005, 2015 jQuery Foundation, Inc. | jquery.org/license */ -!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l=a.document,m="2.1.4",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return n.each(this,a,b)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(n.isPlainObject(d)||(e=n.isArray(d)))?(e?(e=!1,f=c&&n.isArray(c)?c:[]):f=c&&n.isPlainObject(c)?c:{},g[b]=n.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){return!n.isArray(a)&&a-parseFloat(a)+1>=0},isPlainObject:function(a){return"object"!==n.type(a)||a.nodeType||n.isWindow(a)?!1:a.constructor&&!j.call(a.constructor.prototype,"isPrototypeOf")?!1:!0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=n.trim(a),a&&(1===a.indexOf("use strict")?(b=l.createElement("script"),b.text=a,l.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=s(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:g.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=s(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(c=a[b],b=a,a=c),n.isFunction(a)?(e=d.call(arguments,2),f=function(){return a.apply(b||this,e.concat(d.call(arguments)))},f.guid=a.guid=a.guid||n.guid++,f):void 0},now:Date.now,support:k}),n.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function s(a){var b="length"in a&&a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ha(),z=ha(),A=ha(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N=M.replace("w","w#"),O="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+N+"))|)"+L+"*\\]",P=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+O+")*)|.*)\\)|)",Q=new RegExp(L+"+","g"),R=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),S=new RegExp("^"+L+"*,"+L+"*"),T=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),U=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),V=new RegExp(P),W=new RegExp("^"+N+"$"),X={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M.replace("w","w*")+")"),ATTR:new RegExp("^"+O),PSEUDO:new RegExp("^"+P),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,aa=/[+~]/,ba=/'|\\/g,ca=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),da=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},ea=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(fa){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function ga(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],k=b.nodeType,"string"!=typeof a||!a||1!==k&&9!==k&&11!==k)return d;if(!e&&p){if(11!==k&&(f=_.exec(a)))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return H.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName)return H.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=1!==k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(ba,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+ra(o[l]);w=aa.test(a)&&pa(b.parentNode)||b,x=o.join(",")}if(x)try{return H.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function ha(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ia(a){return a[u]=!0,a}function ja(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ka(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function la(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function na(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function oa(a){return ia(function(b){return b=+b,ia(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function pa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=ga.support={},f=ga.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=ga.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=g.documentElement,e=g.defaultView,e&&e!==e.top&&(e.addEventListener?e.addEventListener("unload",ea,!1):e.attachEvent&&e.attachEvent("onunload",ea)),p=!f(g),c.attributes=ja(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ja(function(a){return a.appendChild(g.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(g.getElementsByClassName),c.getById=ja(function(a){return o.appendChild(a).id=u,!g.getElementsByName||!g.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ca,da);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ca,da);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(g.querySelectorAll))&&(ja(function(a){o.appendChild(a).innerHTML="",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ja(function(a){var b=g.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ja(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",P)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===g||a.ownerDocument===v&&t(v,a)?-1:b===g||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,h=[a],i=[b];if(!e||!f)return a===g?-1:b===g?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return la(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?la(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},g):n},ga.matches=function(a,b){return ga(a,null,null,b)},ga.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return ga(b,n,null,[a]).length>0},ga.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},ga.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},ga.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},ga.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=ga.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=ga.selectors={cacheLength:50,createPseudo:ia,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ca,da),a[3]=(a[3]||a[4]||a[5]||"").replace(ca,da),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||ga.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&ga.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ca,da).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=ga.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(Q," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||ga.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ia(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ia(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?ia(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ia(function(a){return function(b){return ga(a,b).length>0}}),contains:ia(function(a){return a=a.replace(ca,da),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ia(function(a){return W.test(a||"")||ga.error("unsupported lang: "+a),a=a.replace(ca,da).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:oa(function(){return[0]}),last:oa(function(a,b){return[b-1]}),eq:oa(function(a,b,c){return[0>c?c+b:c]}),even:oa(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:oa(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:oa(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:oa(function(a,b,c){for(var d=0>c?c+b:c;++db;b++)d+=a[b].value;return d}function sa(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function ta(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ua(a,b,c){for(var d=0,e=b.length;e>d;d++)ga(a,b[d],c);return c}function va(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function wa(a,b,c,d,e,f){return d&&!d[u]&&(d=wa(d)),e&&!e[u]&&(e=wa(e,f)),ia(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ua(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:va(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=va(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=va(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function xa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=sa(function(a){return a===b},h,!0),l=sa(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[sa(ta(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return wa(i>1&&ta(m),i>1&&ra(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&xa(a.slice(i,e)),f>e&&xa(a=a.slice(e)),f>e&&ra(a))}m.push(c)}return ta(m)}function ya(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=F.call(i));s=va(s)}H.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&ga.uniqueSort(i)}return k&&(w=v,j=t),r};return c?ia(f):f}return h=ga.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=xa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,ya(e,d)),f.selector=a}return f},i=ga.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ca,da),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ca,da),aa.test(j[0].type)&&pa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&ra(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,aa.test(a)&&pa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ja(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ja(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||ka("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ja(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ka("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ja(function(a){return null==a.getAttribute("disabled")})||ka(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),ga}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=n.expr.match.needsContext,v=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,w=/^.[^:#\[\.,]*$/;function x(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(w.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return g.call(b,a)>=0!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;c>b;b++)if(n.contains(e[b],this))return!0}));for(b=0;c>b;b++)n.find(a,e[b],d);return d=this.pushStack(c>1?n.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(x(this,a||[],!1))},not:function(a){return this.pushStack(x(this,a||[],!0))},is:function(a){return!!x(this,"string"==typeof a&&u.test(a)?n(a):a||[],!1).length}});var y,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=n.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||y).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:l,!0)),v.test(c[1])&&n.isPlainObject(b))for(c in b)n.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}return d=l.getElementById(c[2]),d&&d.parentNode&&(this.length=1,this[0]=d),this.context=l,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?"undefined"!=typeof y.ready?y.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};A.prototype=n.fn,y=n(l);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};n.extend({dir:function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),n.fn.extend({has:function(a){var b=n(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(n.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=u.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.unique(f):f)},index:function(a){return a?"string"==typeof a?g.call(n(a),this[0]):g.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.unique(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){while((a=a[b])&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return n.dir(a,"parentNode")},parentsUntil:function(a,b,c){return n.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return n.dir(a,"nextSibling")},prevAll:function(a){return n.dir(a,"previousSibling")},nextUntil:function(a,b,c){return n.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return n.dir(a,"previousSibling",c)},siblings:function(a){return n.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return n.sibling(a.firstChild)},contents:function(a){return a.contentDocument||n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(C[a]||n.unique(e),B.test(a)&&e.reverse()),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return n.each(a.match(E)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):n.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(b=a.memory&&l,c=!0,g=e||0,e=0,f=h.length,d=!0;h&&f>g;g++)if(h[g].apply(l[0],l[1])===!1&&a.stopOnFalse){b=!1;break}d=!1,h&&(i?i.length&&j(i.shift()):b?h=[]:k.disable())},k={add:function(){if(h){var c=h.length;!function g(b){n.each(b,function(b,c){var d=n.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&g(c)})}(arguments),d?f=h.length:b&&(e=c,j(b))}return this},remove:function(){return h&&n.each(arguments,function(a,b){var c;while((c=n.inArray(b,h,c))>-1)h.splice(c,1),d&&(f>=c&&f--,g>=c&&g--)}),this},has:function(a){return a?n.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],f=0,this},disable:function(){return h=i=b=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,b||k.disable(),this},locked:function(){return!i},fireWith:function(a,b){return!h||c&&!i||(b=b||[],b=[a,b.slice?b.slice():b],d?i.push(b):j(b)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!c}};return k},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&n.isFunction(a.promise)?e:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(H.resolveWith(l,[n]),n.fn.triggerHandler&&(n(l).triggerHandler("ready"),n(l).off("ready"))))}});function I(){l.removeEventListener("DOMContentLoaded",I,!1),a.removeEventListener("load",I,!1),n.ready()}n.ready.promise=function(b){return H||(H=n.Deferred(),"complete"===l.readyState?setTimeout(n.ready):(l.addEventListener("DOMContentLoaded",I,!1),a.addEventListener("load",I,!1))),H.promise(b)},n.ready.promise();var J=n.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===n.type(c)){e=!0;for(h in c)n.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f};n.acceptData=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function K(){Object.defineProperty(this.cache={},0,{get:function(){return{}}}),this.expando=n.expando+K.uid++}K.uid=1,K.accepts=n.acceptData,K.prototype={key:function(a){if(!K.accepts(a))return 0;var b={},c=a[this.expando];if(!c){c=K.uid++;try{b[this.expando]={value:c},Object.defineProperties(a,b)}catch(d){b[this.expando]=c,n.extend(a,b)}}return this.cache[c]||(this.cache[c]={}),c},set:function(a,b,c){var d,e=this.key(a),f=this.cache[e];if("string"==typeof b)f[b]=c;else if(n.isEmptyObject(f))n.extend(this.cache[e],b);else for(d in b)f[d]=b[d];return f},get:function(a,b){var c=this.cache[this.key(a)];return void 0===b?c:c[b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,n.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=this.key(a),g=this.cache[f];if(void 0===b)this.cache[f]={};else{n.isArray(b)?d=b.concat(b.map(n.camelCase)):(e=n.camelCase(b),b in g?d=[b,e]:(d=e,d=d in g?[d]:d.match(E)||[])),c=d.length;while(c--)delete g[d[c]]}},hasData:function(a){return!n.isEmptyObject(this.cache[a[this.expando]]||{})},discard:function(a){a[this.expando]&&delete this.cache[a[this.expando]]}};var L=new K,M=new K,N=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,O=/([A-Z])/g;function P(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(O,"-$1").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:N.test(c)?n.parseJSON(c):c}catch(e){}M.set(a,b,c)}else c=void 0;return c}n.extend({hasData:function(a){return M.hasData(a)||L.hasData(a)},data:function(a,b,c){ -return M.access(a,b,c)},removeData:function(a,b){M.remove(a,b)},_data:function(a,b,c){return L.access(a,b,c)},_removeData:function(a,b){L.remove(a,b)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=M.get(f),1===f.nodeType&&!L.get(f,"hasDataAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),P(f,d,e[d])));L.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){M.set(this,a)}):J(this,function(b){var c,d=n.camelCase(a);if(f&&void 0===b){if(c=M.get(f,a),void 0!==c)return c;if(c=M.get(f,d),void 0!==c)return c;if(c=P(f,d,void 0),void 0!==c)return c}else this.each(function(){var c=M.get(this,d);M.set(this,d,b),-1!==a.indexOf("-")&&void 0!==c&&M.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){M.remove(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=L.get(a,b),c&&(!d||n.isArray(c)?d=L.access(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return L.get(a,c)||L.access(a,c,{empty:n.Callbacks("once memory").add(function(){L.remove(a,[b+"queue",c])})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.lengthx",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var U="undefined";k.focusinBubbles="onfocusin"in a;var V=/^key/,W=/^(?:mouse|pointer|contextmenu)|click/,X=/^(?:focusinfocus|focusoutblur)$/,Y=/^([^.]*)(?:\.(.+)|)$/;function Z(){return!0}function $(){return!1}function _(){try{return l.activeElement}catch(a){}}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=L.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=n.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return typeof n!==U&&n.event.triggered!==b.type?n.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(E)||[""],j=b.length;while(j--)h=Y.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o&&(l=n.event.special[o]||{},o=(e?l.delegateType:l.bindType)||o,l=n.event.special[o]||{},k=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},f),(m=i[o])||(m=i[o]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(o,g,!1)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),n.event.global[o]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=L.hasData(a)&&L.get(a);if(r&&(i=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=Y.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=i[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete i[o])}else for(o in i)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(i)&&(delete r.handle,L.remove(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,m,o,p=[d||l],q=j.call(b,"type")?b.type:b,r=j.call(b,"namespace")?b.namespace.split("."):[];if(g=h=d=d||l,3!==d.nodeType&&8!==d.nodeType&&!X.test(q+n.event.triggered)&&(q.indexOf(".")>=0&&(r=q.split("."),q=r.shift(),r.sort()),k=q.indexOf(":")<0&&"on"+q,b=b[n.expando]?b:new n.Event(q,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=r.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:n.makeArray(c,[b]),o=n.event.special[q]||{},e||!o.trigger||o.trigger.apply(d,c)!==!1)){if(!e&&!o.noBubble&&!n.isWindow(d)){for(i=o.delegateType||q,X.test(i+q)||(g=g.parentNode);g;g=g.parentNode)p.push(g),h=g;h===(d.ownerDocument||l)&&p.push(h.defaultView||h.parentWindow||a)}f=0;while((g=p[f++])&&!b.isPropagationStopped())b.type=f>1?i:o.bindType||q,m=(L.get(g,"events")||{})[b.type]&&L.get(g,"handle"),m&&m.apply(g,c),m=k&&g[k],m&&m.apply&&n.acceptData(g)&&(b.result=m.apply(g,c),b.result===!1&&b.preventDefault());return b.type=q,e||b.isDefaultPrevented()||o._default&&o._default.apply(p.pop(),c)!==!1||!n.acceptData(d)||k&&n.isFunction(d[q])&&!n.isWindow(d)&&(h=d[k],h&&(d[k]=null),n.event.triggered=q,d[q](),n.event.triggered=void 0,h&&(d[k]=h)),b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(L.get(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(g.namespace))&&(a.handleObj=g,a.data=g.data,e=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==e&&(a.result=e)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!==this;i=i.parentNode||this)if(i.disabled!==!0||"click"!==a.type){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>=0:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h]*)\/>/gi,ba=/<([\w:]+)/,ca=/<|&#?\w+;/,da=/<(?:script|style|link)/i,ea=/checked\s*(?:[^=]|=\s*.checked.)/i,fa=/^$|\/(?:java|ecma)script/i,ga=/^true\/(.*)/,ha=/^\s*\s*$/g,ia={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};ia.optgroup=ia.option,ia.tbody=ia.tfoot=ia.colgroup=ia.caption=ia.thead,ia.th=ia.td;function ja(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function ka(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function la(a){var b=ga.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function ma(a,b){for(var c=0,d=a.length;d>c;c++)L.set(a[c],"globalEval",!b||L.get(b[c],"globalEval"))}function na(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(L.hasData(a)&&(f=L.access(a),g=L.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)n.event.add(b,e,j[e][c])}M.hasData(a)&&(h=M.access(a),i=n.extend({},h),M.set(b,i))}}function oa(a,b){var c=a.getElementsByTagName?a.getElementsByTagName(b||"*"):a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&n.nodeName(a,b)?n.merge([a],c):c}function pa(a,b){var c=b.nodeName.toLowerCase();"input"===c&&T.test(a.type)?b.checked=a.checked:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}n.extend({clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=n.contains(a.ownerDocument,a);if(!(k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(g=oa(h),f=oa(a),d=0,e=f.length;e>d;d++)pa(f[d],g[d]);if(b)if(c)for(f=f||oa(a),g=g||oa(h),d=0,e=f.length;e>d;d++)na(f[d],g[d]);else na(a,h);return g=oa(h,"script"),g.length>0&&ma(g,!i&&oa(a,"script")),h},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,k=b.createDocumentFragment(),l=[],m=0,o=a.length;o>m;m++)if(e=a[m],e||0===e)if("object"===n.type(e))n.merge(l,e.nodeType?[e]:e);else if(ca.test(e)){f=f||k.appendChild(b.createElement("div")),g=(ba.exec(e)||["",""])[1].toLowerCase(),h=ia[g]||ia._default,f.innerHTML=h[1]+e.replace(aa,"<$1>")+h[2],j=h[0];while(j--)f=f.lastChild;n.merge(l,f.childNodes),f=k.firstChild,f.textContent=""}else l.push(b.createTextNode(e));k.textContent="",m=0;while(e=l[m++])if((!d||-1===n.inArray(e,d))&&(i=n.contains(e.ownerDocument,e),f=oa(k.appendChild(e),"script"),i&&ma(f),c)){j=0;while(e=f[j++])fa.test(e.type||"")&&c.push(e)}return k},cleanData:function(a){for(var b,c,d,e,f=n.event.special,g=0;void 0!==(c=a[g]);g++){if(n.acceptData(c)&&(e=c[L.expando],e&&(b=L.cache[e]))){if(b.events)for(d in b.events)f[d]?n.event.remove(c,d):n.removeEvent(c,d,b.handle);L.cache[e]&&delete L.cache[e]}delete M.cache[c[M.expando]]}}}),n.fn.extend({text:function(a){return J(this,function(a){return void 0===a?n.text(this):this.empty().each(function(){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&(this.textContent=a)})},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=ja(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=ja(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?n.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||n.cleanData(oa(c)),c.parentNode&&(b&&n.contains(c.ownerDocument,c)&&ma(oa(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(n.cleanData(oa(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return J(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!da.test(a)&&!ia[(ba.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(aa,"<$1>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(oa(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,n.cleanData(oa(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,m=this,o=l-1,p=a[0],q=n.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&ea.test(p))return this.each(function(c){var d=m.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(c=n.buildFragment(a,this[0].ownerDocument,!1,this),d=c.firstChild,1===c.childNodes.length&&(c=d),d)){for(f=n.map(oa(c,"script"),ka),g=f.length;l>j;j++)h=c,j!==o&&(h=n.clone(h,!0,!0),g&&n.merge(f,oa(h,"script"))),b.call(this[j],h,j);if(g)for(i=f[f.length-1].ownerDocument,n.map(f,la),j=0;g>j;j++)h=f[j],fa.test(h.type||"")&&!L.access(h,"globalEval")&&n.contains(i,h)&&(h.src?n._evalUrl&&n._evalUrl(h.src):n.globalEval(h.textContent.replace(ha,"")))}return this}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=[],e=n(a),g=e.length-1,h=0;g>=h;h++)c=h===g?this:this.clone(!0),n(e[h])[b](c),f.apply(d,c.get());return this.pushStack(d)}});var qa,ra={};function sa(b,c){var d,e=n(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:n.css(e[0],"display");return e.detach(),f}function ta(a){var b=l,c=ra[a];return c||(c=sa(a,b),"none"!==c&&c||(qa=(qa||n("